repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
marcocaccin/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 6 | 39791 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2*ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=1,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0, max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
ulisespereira/LearningSequences | fixedConnectivity/sequences.py | 1 | 5596 | import numpy as np
from scipy import sparse
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import math as mt
# this is the transfer function
def phi(x,theta,uc):
myphi=nu*(x-theta)
myphi[x>uc]=nu*(uc-theta)
myphi[theta>x]=0.
return myphi
def phi_brunel(x,theta,uc):
myphi=nu_brunel*((x-theta)/uc)**2
myphi[x>(uc+theta)]=2*nu_brunel*np.sqrt((x[x>(uc+theta)]-theta)/uc-3./4.)
myphi[theta>x]=0.
return myphi
def phi_tanh(x):
return 0.5*(1+np.tanh(a1*(x+b1)))
# this is the connectivity matrix of the network
def net(wmax,sdel,n,k):
mysdel=np.concatenate([sdel[i]*np.ones(n/k) for i in range(k)])
mysdel=mysdel[0:-1]
mywmax=np.concatenate([wmax[i]*np.ones(n/k) for i in range(k)])
diagonals=[mywmax,mysdel]
return sparse.diags(diagonals,[0,-1])
def net_matrix(wmax_min,wmax_max,sdel_min,sdel_max,n,k):
sdel=np.linspace(sdel_min,sdel_max,k)
wmax=np.linspace(wmax_min,wmax_max,k)
mysdel=np.concatenate([sdel[i]*np.ones(n/k) for i in range(k)])
mysdel=mysdel[0:-1]
mywmax=np.concatenate([wmax[i]*np.ones(n/k) for i in range(k)])
diagonals=[mywmax,mysdel]
vuelta=np.zeros((n,n))
vuelta[0,-1]=0.1
return np.diag(diagonals[0],0)+np.diag(diagonals[1],-1)-w_inh*nu*np.ones((n,n))#+vuelta
#fields approximations
def field_tanh(x,t):
return net(wmax,sdel,n,k).dot(phi_tanh(x))-x-w_inh*np.dot(r1_matrix,phi_tanh(x))
def field_pw(x,t):
return net(wmax,sdel,n,k).dot(phi(x,theta,uc))-x-w_inh*np.dot(r1_matrix,phi(x,theta,uc))
def field_brunel(x,t):
return net(wmax,sdel,n,k).dot(phi_brunel(x,theta,uc))-x-w_inh*np.dot(r1_matrix,phi_brunel(x,theta,uc))
#field true
def field_true_tanh(x,t):
n=len(x)
thefield=np.zeros(n)
thefield[0:n-1]=net(wmax,sdel,n-1,k).dot(phi_tanh(x[0:n-1]))-x[0:n-1]-w_inh*x[-1]*np.ones(n-1)
thefield[-1]=2.*(-x[-1]+np.ones(n-1).dot(phi_tanh(x[0:n-1])))
return thefield
def field_true_pw(x,t):
n=len(x)
thefield=np.zeros(n)
thefield[0:n-1]=net(wmax,sdel,n-1,k).dot(phi(x[0:n-1],theta,uc))-x[0:n-1]-w_inh*x[-1]*np.ones(n-1)
thefield[-1]=2.*(-x[-1]+np.ones(n-1).dot(phi(x[0:n-1],theta,uc)))
return thefield
def field_true_brunel(x,t):
n=len(x)
thefield=np.zeros(n)
thefield[0:n-1]=net(wmax,sdel,n-1,k).dot(phi_brunel(x[0:n-1],theta_brunel,uc_brunel))-x[0:n-1]-w_inh*x[-1]*np.ones(n-1)
thefield[-1]=2.*(-x[-1]+np.ones(n-1).dot(phi_brunel(x[0:n-1],theta_brunel,uc_brunel)))
return thefield
def rk4(f,y0,dt,T):
mysol=[]
mytime=[]
t=0
un=y0
mytime.append(t)
mysol.append(un)
while t<=T:
k1=f(un,t)
k2=f(un+(dt/2)*k1,t+dt/2)
k3=f(un+(dt/2)*k2,t+dt/2)
k4=f(un+dt*k3,t+dt)
un=un+(dt/6)*(k1+2*k2+2*k3+k4)
t=t+dt
mysol.append(un)
mytime.append(t)
print(t)
return np.array(mysol),mytime
n=20
k=1
w_i=3.8
w_inh=w_i/n
nu=2.
nu_brunel=0.4*nu
theta=-0.0
theta_brunel=-0.1
uc=1/nu
uc_brunel=uc
sdelmax=0.6
sdelmin=0.6
wmaxmin=0.05
wmaxmax=0.05
#print(1./(nu+sdel))
a1=6.
b1=-0.25
sdel=np.linspace(sdelmin,sdelmax,k)
wmax=np.linspace(sdelmin,sdelmax,k)
r1_matrix=np.ones((n,n))#np.outer(np.ones(n),np.random.normal(1,sigma_wi/n,n))
y0=theta*np.ones(n)
y0[0]=1.
y0_true=np.zeros(n+1)
y0_true[0]=1.
#approx
ytanh,timetanh=rk4(field_pw,y0,0.1,50)
ypw,timepw=rk4(field_tanh,y0,0.1,50)
ybrunel,timebrunel=rk4(field_brunel,y0,0.1,50)
#true
ytanh_true,timetanh_true=rk4(field_true_pw,y0_true,0.1,50)
ypw_true,timepw_true=rk4(field_true_tanh,y0_true,0.1,50)
ybrunel_true,timebrunel_true=rk4(field_true_brunel,y0_true,0.1,50)
#figure
figure=plt.figure()
#connectivity matrix
W01=net_matrix(wmaxmin,wmaxmax,sdelmin,sdelmax,n,k)
matrix_AL=figure.add_subplot(221)
mymatrix=matrix_AL.matshow(W01)
cbaxes = figure.add_axes([0.05, 0.51, 0.03, 0.45])
figure.colorbar(mymatrix,cax=cbaxes)
matrix_AL.set_xlabel('connectivity matrix')
#transfer function
myu=np.linspace(-.5,1.5,200)
tf=figure.add_subplot(222)
l1=tf.plot(myu,phi_tanh(myu),'b')
l2=tf.plot(myu,phi(myu,theta,uc),'g')
l3=tf.plot(myu,phi_brunel(myu,theta_brunel,uc_brunel),'r')
tf.set_xlabel('Current')
tf.set_ylabel('Transfer function value ')
tf.legend(('Piecewise','Sigmoidial','Brunel'),'upper left')
#tf.set_ylim([0,4.2])
#dynamics
dynamics=figure.add_subplot(223)
dynamics.plot(timetanh_true,ytanh_true[:,0:n],'b')
dynamics.plot(timepw_true,ypw_true[:,0:n],'g')
dynamics.plot(timebrunel_true,ybrunel_true[:,0:n],'r')
dynamics.set_xlim([0,50.])
dynamics.set_xlabel('Time')
dynamics.set_ylabel('Exc.Pop. Current')
#dynamics
dynamics2=figure.add_subplot(224)
dynamics2.plot(timetanh_true,ytanh_true[:,0:n],'b')
dynamics2.plot(timepw_true,ypw_true[:,0:n],'g')
dynamics2.plot(timebrunel_true,ybrunel_true[:,0:n],'r')
dynamics2.set_xlim([0,15.])
dynamics2.set_xlabel('Time')
dynamics2.set_ylabel('Exc.Pop. Current')
plt.show()
figure2=plt.figure()
#dynamics
dynamics=figure2.add_subplot(121)
dynamics.plot(timetanh_true,ytanh_true[:,0:n],'b',linewidth=2)
#dynamics.plot(timepw_true,ypw_true[:,0:n],'g')
dynamics.plot(timetanh,ytanh[:,0:n],'r--',linewidth=2)
#dynamics.plot(timepw,ypw[:,0:n],'g--')
dynamics.set_xlim([0,50.])
dynamics.set_xlabel('Time')
dynamics.set_ylabel('Exc.Pop. Current')
#dynamics.legend(('Complete network','Approximation'),'upper left')
#dynamics
dynamics2=figure2.add_subplot(122)
dynamics2.plot(timetanh_true,ytanh_true[:,0:n],'b',linewidth=2,label='Complete network')
#dynamics2.plot(timepw_true,ypw_true[:,0:n],'g')
dynamics2.plot(timetanh,ytanh[:,0:n],'r--',linewidth=2,label='Approximation')
#dynamics2.plot(timepw,ypw[:,0:n],'g--')
dynamics2.set_xlim([0,15.])
dynamics2.set_xlabel('Time')
dynamics2.set_ylabel('Exc.Pop. Current')
#dynamics2.legend(('Complete network','Approximation'),'upper left')
plt.show()
| gpl-2.0 |
SpatialMetabolomics/SM_standalone | simple_webserver.py | 1 | 10185 | # for PNG generation
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import re
from colourmaps import viridis_colormap
from pipelines import inMemoryIMS_low_mem
from collections import OrderedDict
# webserver
import bottle
# isotope pattern generation
from pyMS.pyisocalc import pyisocalc
from pyIMS.ion_datacube import ion_datacube
from pyImagingMSpec.image_measures import *
from cpyImagingMSpec import ImzbReader
def get_datacube(reader, mzs, ppm):
cube = ion_datacube()
cube.xic = []
cube.nRows = reader.height
cube.nColumns = reader.width
cube.pixel_indices = None
for mz in mzs:
img = reader.get_mz_image(mz, ppm)
if cube.pixel_indices is None:
cube.pixel_indices = np.where(img.ravel() >= 0)[0]
img = img.ravel()[cube.pixel_indices]
img[img < 0] = 0.0
cube.xic.append(img)
return cube
class ImageWebserver(bottle.Bottle):
def __init__(self, *args, **kwargs):
super(ImageWebserver, self).__init__(*args, **kwargs)
def run(self, filenames, **kwargs):
self.load_data(filenames)
print "running webserver..."
super(ImageWebserver, self).run(**kwargs)
def load_data(self, filenames):
def prettify_fn(fn):
import os
return os.path.splitext(os.path.basename(fn))[0]
if len(filenames) == 0:
print "usage: python simple_webserver.py <file.imzML>"
print " python simple_webserver.py <file.hdf5>"
print " python simple_webserver.py <file1.imzb> [<file2.imzb> ...]"
sys.exit(0)
if len(filenames) > 1 and not all(fn.endswith(".imzb") for fn in filenames):
print "multiple-file mode is supported only for .imzb files"
sys.exit(2)
if len(filenames) == 1 and not filenames[0].endswith(".imzb"):
filename = filenames[0]
if filename.endswith(".imzML") or filename.endswith(".hdf5"):
print "loading data..."
self.data = inMemoryIMS_low_mem(filename)
self.in_memory = True
self.paths = { prettify_fn(filename) : filename }
else:
print "unsupported format"
sys.exit(3)
else:
self.paths = OrderedDict()
for fn in filenames:
if os.path.exists(fn):
self.paths[prettify_fn(fn)] = ImzbReader(fn)
else:
print "WARNING: file " + fn + " doesn't exist, skipping"
self.in_memory = False
def get_ion_image(self, dataset, mz, tol):
if self.in_memory is True:
return self.data.get_ion_image(np.array([mz]), tol).xic_to_image(0)
else:
return self.paths[dataset].get_mz_image(mz, tol)
def get_datacube(self, dataset, mzs, tol):
if self.in_memory is True:
return self.data.get_ion_image(np.array(mzs), tol)
else:
return get_datacube(self.paths[dataset], mzs, tol)
app = ImageWebserver()
@app.route('/', method='GET')
def show_form():
return bottle.template('show_images', hs_removal=True, selected_dataset=app.paths.iterkeys().next(),
isotope_patterns={}, formula="", selected_adduct='H', pretty_formula="", tol=5,
resolution=100000, npeaks=4, datasets=app.paths.keys())
import io
import os
import numpy as np
from matplotlib.colors import Normalize
cmap = viridis_colormap()
@app.route("/show_image/<dataset>/<mz>/<tol>")
def generate_image(dataset, mz, tol):
mz, tol = float(mz), float(tol)
img = app.get_ion_image(dataset, mz, tol)
if img.shape[0] > img.shape[1]:
img = img.T
buf = io.BytesIO()
mask = img >= 0
if bottle.request.query.remove_hotspots:
pc = np.percentile(img[mask], 99)
img[img > pc] = pc
values = img[mask]
norm = Normalize(vmin=np.min(values), vmax=np.max(values))
colorized_img = np.zeros((img.shape[0], img.shape[1], 4))
colorized_img[mask] = cmap(norm(values))
# set alpha channel to 0 for pixels with no data
colorized_img[img < 0, -1] = 0
plt.imsave(buf, colorized_img, format='png')
bottle.response.content_type = 'image/png'
buf.seek(0, os.SEEK_END)
bottle.response.content_length = buf.tell()
buf.seek(0)
return buf
@app.route("/correlation_plot/<dataset>/<formula>/<adduct>/<mzs>/<intensities>/<tol>")
def generate_correlation_plot(dataset, formula, adduct, mzs, intensities, tol):
mzs = np.array(map(float, mzs.split(",")))
intensities = np.array(map(float, intensities.split(",")))
order = intensities.argsort()[::-1]
mzs = mzs[order]
intensities = intensities[order]
tol = float(tol)
datacube = app.get_datacube(dataset, np.array(mzs), tol)
images = datacube.xic
buf = io.BytesIO()
transform = np.sqrt
base_intensities = images[0]
plt.figure(figsize=(16, 8))
ax1 = plt.subplot(1, 2, 1)
plt.title("per-pixel isotope pattern agreement (higher is better)")
n = min(len(datacube.xic), len(intensities))
full_images = np.array([transform(datacube.xic_to_image(i)) for i in xrange(n)])
full_images /= np.linalg.norm(full_images, ord=2, axis=0)
normalized_ints = transform(intensities[:n])
normalized_ints /= np.linalg.norm(normalized_ints)
#correlations = np.einsum("ijk,i", full_images, normalized_ints)
#plt.imshow(correlations, vmin=0, vmax=1)
deviations = 1 - np.amax(np.abs(np.transpose(full_images, (1, 2, 0)) - normalized_ints), axis=2)
if deviations.shape[0] > deviations.shape[1]:
deviations = deviations.T
plt.imshow(deviations, vmin=0, vmax=1, cmap="gnuplot", interpolation='none')
plt.axis('off')
# http://stackoverflow.com/questions/26034777/matplotlib-same-height-for-colorbar-as-for-plot
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad="3%")
cbar = plt.colorbar(cax = cax1)
markersize = min(20, (10000.0 / (1 + np.sum(images[1] > 0))) ** 0.5)
plt.subplot(1, 2, 2)
plt.xlabel("sqrt( principal peak intensities )")
plt.ylabel("sqrt( other peak intensities )")
plt.title(formula + " + " + adduct + " (m/z={:.4f})".format(mzs[0]) +\
"\n(lines are based on the predicted isotope pattern)")
colors = ['blue', 'red', 'green', 'purple', 'black']
for i in xrange(1, min(5, len(images))):
ratio = intensities[i] / intensities[0]
observed = images[i]
mask = base_intensities > 0
label = "m/z={0:.4f} {1:.1%}".format(mzs[i], intensities[i] / 100.0)
plt.plot(transform(base_intensities[mask]), transform(observed[mask]), '.', markersize=markersize,
color = colors[i-1], label=label)
xs = transform(base_intensities[mask])
ys = transform(base_intensities[mask] * ratio)
order = xs.argsort()
plt.plot(xs[order], ys[order], color=colors[i-1], linewidth=0.5)
lgnd = plt.legend(loc='upper left', numpoints=10)
# http://stackoverflow.com/questions/24706125/setting-a-fixed-size-for-points-in-legend
for handle in lgnd.legendHandles:
handle._legmarker.set_markersize(6)
plt.tight_layout(w_pad=5.0)
plt.savefig(buf)
plt.close()
bottle.response.content_type = 'image/png'
buf.seek(0, os.SEEK_END)
bottle.response.content_length = buf.tell()
buf.seek(0)
return buf
@app.route("/show")
def show_images_get():
dataset = bottle.request.params.get('dataset', app.paths.iterkeys().next())
formula = bottle.request.params.get('formula', '')
tolerance = float(bottle.request.params.get('tolerance', 5.0))
resolution = float(bottle.request.params.get('resolution', 1e5))
selected_adduct = bottle.request.params.get('adduct', 'H')
hs_removal = bottle.request.GET.get('hs_removal', False)
k = int(bottle.request.params.get('npeaks', 4))
if hs_removal == 'on':
hs_removal = True
pts = float(bottle.request.params.get('pts', 10))
cutoff = float(bottle.request.params.get('pyisocalc_cutoff', 1e-3))
adducts = ['H', 'K', 'Na']
isotope_patterns = {}
for adduct in adducts:
sf = pyisocalc.SumFormulaParser.parse_string(formula + adduct)
raw_pattern = pyisocalc.isodist(sf, cutoff)
fwhm = raw_pattern.get_spectrum()[0][0] / resolution
pattern = pyisocalc.apply_gaussian(raw_pattern, fwhm, pts, exact=True)
mzs, intensities = map(np.array, pattern.get_spectrum(source='centroids'))
if len(mzs) > k:
order = intensities.argsort()[::-1]
mzs = mzs[order][:k]
intensities = intensities[order][:k]
order = mzs.argsort()
mzs = mzs[order]
intensities = intensities[order]
datacube = app.get_datacube(dataset, mzs, tolerance)
if hs_removal:
for img in datacube.xic:
if len(img) > 0:
pc = np.percentile(img, 99)
img[img > pc] = pc
chaos = measure_of_chaos(datacube.xic_to_image(0), 30, overwrite=False)
iso_corr = isotope_pattern_match(datacube.xic, intensities)
img_corr = 1.0 # return 1 if there's a single peak
if len(intensities[1:]) > 1:
img_corr = isotope_image_correlation(datacube.xic, weights=intensities[1:])
stats = {'measure of chaos': chaos,
'image correlation score': img_corr,
'isotope pattern score': iso_corr}
isotope_patterns[adduct] = (mzs, intensities, stats)
return bottle.template('show_images', hs_removal=hs_removal,
isotope_patterns=isotope_patterns, formula=formula, selected_adduct=selected_adduct,
pretty_formula=re.sub(r"(\d+)", r"<sub>\1</sub>", formula),
resolution=resolution, tol=tolerance, datasets=app.paths.keys(),
npeaks=k, selected_dataset=dataset)
import sys
app.run(sys.argv[1:], port=8081)
| apache-2.0 |
npetrenko/recurrent_frcnn | measure_map.py | 1 | 9118 | import os
import cv2
import numpy as np
import sys
import pickle
from optparse import OptionParser
import time
from keras_frcnn import config
import keras_frcnn.resnet as nn
from keras import backend as K
from keras.layers import Input
from keras.models import Model
from keras_frcnn import roi_helpers
from keras_frcnn import data_generators
from sklearn.metrics import average_precision_score
def get_map(pred, gt, f):
T = {}
P = {}
fx, fy = f
for bbox in gt:
bbox['bbox_matched'] = False
pred_probs = np.array([s['prob'] for s in pred])
box_idx_sorted_by_prob = np.argsort(pred_probs)[::-1]
for box_idx in box_idx_sorted_by_prob:
pred_box = pred[box_idx]
pred_class = pred_box['class']
pred_x1 = pred_box['x1']
pred_x2 = pred_box['x2']
pred_y1 = pred_box['y1']
pred_y2 = pred_box['y2']
pred_prob = pred_box['prob']
if pred_class not in P:
P[pred_class] = []
T[pred_class] = []
P[pred_class].append(pred_prob)
found_match = False
for gt_box in gt:
gt_class = gt_box['class']
gt_x1 = gt_box['x1']/fx
gt_x2 = gt_box['x2']/fx
gt_y1 = gt_box['y1']/fy
gt_y2 = gt_box['y2']/fy
gt_seen = gt_box['bbox_matched']
if gt_class != pred_class:
continue
if gt_seen:
continue
iou = data_generators.iou((pred_x1, pred_y1, pred_x2, pred_y2), (gt_x1, gt_y1, gt_x2, gt_y2))
if iou >= 0.5:
found_match = True
gt_box['bbox_matched'] = True
break
else:
continue
T[pred_class].append(int(found_match))
for gt_box in gt:
if not gt_box['bbox_matched'] and not gt_box['difficult']:
if gt_box['class'] not in P:
P[gt_box['class']] = []
T[gt_box['class']] = []
T[gt_box['class']].append(1)
P[gt_box['class']].append(0)
#import pdb
#pdb.set_trace()
return T, P
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("-p", "--path", dest="test_path", help="Path to test data.")
parser.add_option("-n", "--num_rois", dest="num_rois",
help="Number of ROIs per iteration. Higher means more memory use.", default=32)
parser.add_option("--config_filename", dest="config_filename", help=
"Location to read the metadata related to the training (generated when training).",
default="config.pickle")
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple or pascal_voc",
default="pascal_voc"),
(options, args) = parser.parse_args()
if not options.test_path: # if filename is not given
parser.error('Error: path to test data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
config_output_filename = options.config_filename
with open(config_output_filename, 'r') as f_in:
C = pickle.load(f_in)
# turn off any data augmentation at test time
C.use_horizontal_flips = False
C.use_vertical_flips = False
C.rot_90 = False
img_path = options.test_path
def format_img(img, C):
img_min_side = float(C.im_size)
(height,width,_) = img.shape
if width <= height:
f = img_min_side/width
new_height = int(f * height)
new_width = int(img_min_side)
else:
f = img_min_side/height
new_width = int(f * width)
new_height = int(img_min_side)
fx = width/float(new_width)
fy = height/float(new_height)
img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)
img = img[:, :, (2, 1, 0)]
img = img.astype(np.float32)
img[:, :, 0] -= C.img_channel_mean[0]
img[:, :, 1] -= C.img_channel_mean[1]
img[:, :, 2] -= C.img_channel_mean[2]
img /= C.img_scaling_factor
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
return img, fx, fy
class_mapping = C.class_mapping
if 'bg' not in class_mapping:
class_mapping['bg'] = len(class_mapping)
class_mapping = {v: k for k, v in class_mapping.iteritems()}
print(class_mapping)
class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}
C.num_rois = int(options.num_rois)
if K.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
input_shape_features = (1024, None, None)
else:
input_shape_img = (None, None, 3)
input_shape_features = (None, None, 1024)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(C.num_rois, 4))
feature_map_input = Input(shape=input_shape_features)
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn_layers = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True)
model_rpn = Model(img_input, rpn_layers)
model_classifier_only = Model([feature_map_input, roi_input], classifier)
model_classifier = Model([feature_map_input, roi_input], classifier)
model_rpn.load_weights(C.model_path, by_name=True)
model_classifier.load_weights(C.model_path, by_name=True)
model_rpn.compile(optimizer='sgd', loss='mse')
model_classifier.compile(optimizer='sgd', loss='mse')
all_imgs, _, _ = get_data(options.test_path)
test_imgs = [s for s in all_imgs if s['imageset'] == 'test']
T = {}
P = {}
for idx, img_data in enumerate(test_imgs):
print('{}/{}'.format(idx,len(test_imgs)))
st = time.time()
filepath = img_data['filepath']
img = cv2.imread(filepath)
X, fx, fy = format_img(img, C)
if K.image_dim_ordering() == 'tf':
X = np.transpose(X, (0, 2, 3, 1))
# get the feature maps and output from the RPN
[Y1, Y2, F] = model_rpn.predict(X)
R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7)
# convert from (x1,y1,x2,y2) to (x,y,w,h)
R[:, 2] -= R[:, 0]
R[:, 3] -= R[:, 1]
# apply the spatial pyramid pooling to the proposed regions
bboxes = {}
probs = {}
for jk in range(R.shape[0] // C.num_rois + 1):
ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0)
if ROIs.shape[1] == 0:
break
if jk == R.shape[0] // C.num_rois:
# pad R
curr_shape = ROIs.shape
target_shape = (curr_shape[0], C.num_rois, curr_shape[2])
ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
ROIs_padded[:, :curr_shape[1], :] = ROIs
ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
ROIs = ROIs_padded
[P_cls, P_regr] = model_classifier_only.predict([F, ROIs])
for ii in range(P_cls.shape[1]):
if np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
continue
cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]
if cls_name not in bboxes:
bboxes[cls_name] = []
probs[cls_name] = []
(x, y, w, h) = ROIs[0, ii, :]
cls_num = np.argmax(P_cls[0, ii, :])
try:
(tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
tx /= C.classifier_regr_std[0]
ty /= C.classifier_regr_std[1]
tw /= C.classifier_regr_std[2]
th /= C.classifier_regr_std[3]
x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
except:
raise
pass
bboxes[cls_name].append([16 * x, 16 * y, 16 * (x + w), 16 * (y + h)])
probs[cls_name].append(np.max(P_cls[0, ii, :]))
all_dets = []
for key in bboxes:
bbox = np.array(bboxes[key])
new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5)
for jk in range(new_boxes.shape[0]):
(x1, y1, x2, y2) = new_boxes[jk, :]
det = {'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': key, 'prob': new_probs[jk]}
all_dets.append(det)
print('Elapsed time = {}'.format(time.time() - st))
t, p = get_map(all_dets, img_data['bboxes'], (fx, fy))
for key in t.keys():
if key not in T:
T[key] = []
P[key] = []
T[key].extend(t[key])
P[key].extend(p[key])
all_aps = []
for key in T.keys():
ap = average_precision_score(T[key], P[key])
print('{} AP: {}'.format(key, ap))
all_aps.append(ap)
print('mAP = {}'.format(np.mean(np.array(all_aps))))
#print(T)
#print(P)
| apache-2.0 |
genialis/resolwe-bio | resolwe_bio/tools/star_sj_to_bed12.py | 1 | 3497 | #!/usr/bin/python3
"""Recalculate from STAR SJ.out.tab file to BED12 format."""
import argparse
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("sj_file", help="STAR SJ.out.tab output file")
args = parser.parse_args()
# STAR SJ.out.tab file consist of following columns:
# column 1: chromosome
# column 2: first base of the intron (1-based)
# column 3: last base of the intron (1-based)
# column 4: strand (0: undefined, 1: +, 2: -)
# column 5: intron motif: 0: non-canonical; 1: GT/AG, 2: CT/AC, 3: GC/AG, 4: CT/GC, 5: AT/AC, 6: GT/AT
# column 6: 0: unannotated, 1: annotated (only if splice junctions database is used)
# column 7: number of uniquely mapping reads crossing the junction
# column 8: number of multi-mapping reads crossing the junction
# column 9: maximum spliced alignment overhang
sj_file = pd.read_csv(args.sj_file, delimiter="\t", header=None)
# BED12 consists of 12 columns:
header = [
"chromosome",
"sj_start",
"sj_end",
"sj_name",
"score",
"strand",
"thick_start",
"thick_end",
"item_rgb",
"block_counts",
"block_sizes",
"block_starts",
]
bed_file = pd.DataFrame(index=sj_file.index, columns=header)
# 1: chromosome = first column from STAR SJ.out.tab
bed_file.loc[:, "chromosome"] = sj_file.iloc[:, 0].values
# 2: SJ start (0-based) =
# (first base of the intron (1-based) - maximum spliced alignment overhang) -1 (to recalculate to
# 0 based system)
bed_file.loc[:, "sj_start"] = (sj_file.iloc[:, 1]) - (sj_file.iloc[:, 8]) - 1
# 3: SJ end (0-based) =
# (last base of the intron (1-based) + maximum spliced alignment overhang)
bed_file.loc[:, "sj_end"] = (sj_file.iloc[:, 2]) + (sj_file.iloc[:, 8])
# 4: SJ name
rows_num_length = len(str(len(sj_file.index)))
bed_file.loc[:, "sj_name"] = (
(sj_file.index + 1).astype(str).map(lambda x: "JUNC0" + x.zfill(rows_num_length))
)
# 5: score = number of uniquely and multi mapping reads crossing the junction
bed_file.loc[:, "score"] = sj_file.iloc[:, 6].values + sj_file.iloc[:, 7].values
# 6: strand = 0: '.' (undefined) , 1: '+', 2: '-
conditions = [sj_file.iloc[:, 3] == 0, sj_file.iloc[:, 3] == 1, sj_file.iloc[:, 3] == 2]
choices_strand = [".", "+", "-"]
bed_file.loc[:, "strand"] = np.select(conditions, choices_strand)
# 7: thick start is the same as SJ start
bed_file.loc[:, "thick_start"] = (sj_file.iloc[:, 1]) - (sj_file.iloc[:, 8]) - 1
# 8: thick end is the same as SJ end
bed_file.loc[:, "thick_end"] = (sj_file.iloc[:, 2]) + (sj_file.iloc[:, 8])
# 9: item RGB = 255,0,0 (red color) for '-' strand, 0,0,255 (blue color) for '+' strand
# and 0,0,0 (black) for undefined
choices_rgb = ["0,0,0", "0,0,255", "255,0,0"]
bed_file.loc[:, "item_rgb"] = np.select(conditions, choices_rgb)
# 10: block counts = 2
bed_file.loc[:, "block_counts"] = "2"
# 11: block sizes = maximum spliced alignment overhang, maximum spliced alignment overhang
bed_file.loc[:, "block_sizes"] = (
(sj_file.iloc[:, 8]).astype(str) + "," + (sj_file.iloc[:, 8]).astype(str)
)
# 12: block starts (a comma-separated list of block starts, relative to SJ start)
# = 0, (SJ end - SJ start + maximum spliced alignment overhang +1 )
bed_file.loc[:, "block_starts"] = (
"0" # first block allways starts at SJ start
+ ","
+ ((sj_file.iloc[:, 2]) - (sj_file.iloc[:, 1]) + (sj_file.iloc[:, 8]) + 1).astype(
str
)
)
bed_file.to_csv("junctions_unsorted.bed", sep="\t", index=False, header=False)
| apache-2.0 |
Lawrence-Liu/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
tzk/EDeN | eden/display/graph_layout.py | 2 | 6323 | #!/usr/bin/env python
"""Provides layout in 2D of vector instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import networkx as nx
import numpy as np
from numpy.linalg import inv
from sklearn.base import BaseEstimator, ClassifierMixin
import logging
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class KKEmbedder(BaseEstimator, ClassifierMixin):
"""Given a graph computes 2D embedding.
Based on the algorithm from:
Tomihisa Kamada, and Satoru Kawai. "An algorithm for drawing general
undirected graphs." Information processing letters 31, no. 1 (1989): 7-15.
"""
def __init__(self, stop_eps=.1, n_iter=30,
init_pos=None):
"""Constructor."""
self.stop_eps = stop_eps
self.n_iter = n_iter
self.init_pos = init_pos
self.dms = []
def _compute_all_pairs(self, graph, weight=None, normalize=False):
lengths = nx.all_pairs_dijkstra_path_length(graph, weight=weight)
max_length = max([max(lengths[i].values()) for i in lengths])
if normalize:
for i in lengths:
for j in lengths[i]:
lengths[i][j] = float(lengths[i][j]) / max_length
return lengths
def _compute_initial_pos(self, graph):
_radius = 1
_offset = 0
n = len(graph)
pos = {id: np.array([_radius * math.cos(theta - math.pi / 2) + _offset,
_radius * math.sin(theta - math.pi / 2) + _offset]
)
for id, theta in enumerate(
np.linspace(0, 2 * math.pi * (1 - 1 / float(n)), num=n))}
return pos
def _compute_dE(self, pos=None, lengths=None, weights=None, m=None):
dEx = 0
dEy = 0
d2Ex2 = 0
d2Ey2 = 0
d2Exy = 0
d2Eyx = 0
for i in pos:
if i != m:
xmi = pos[m][0] - pos[i][0]
ymi = pos[m][1] - pos[i][1]
xmi2 = xmi * xmi
ymi2 = ymi * ymi
xmi_ymi2 = xmi2 + ymi2
lmi = lengths[m][i]
kmi = weights[m][i] / (lmi * lmi)
dEx += kmi * (xmi - (lmi * xmi) / math.sqrt(xmi_ymi2))
dEy += kmi * (ymi - (lmi * ymi) / math.sqrt(xmi_ymi2))
d2Ex2 += kmi * (1 - (lmi * ymi2) / math.pow(xmi_ymi2, 1.5))
d2Ey2 += kmi * (1 - (lmi * xmi2) / math.pow(xmi_ymi2, 1.5))
res = kmi * (lmi * xmi * ymi) / math.pow(xmi_ymi2, 1.5)
d2Exy += res
d2Eyx += res
return dEx, dEy, d2Ex2, d2Ey2, d2Exy, d2Eyx
def _compute_dm(self, pos=None, lengths=None, weights=None, m=None):
dEx = 0
dEy = 0
for i in pos:
if i != m:
xmi = pos[m][0] - pos[i][0]
ymi = pos[m][1] - pos[i][1]
xmi2 = xmi * xmi
ymi2 = ymi * ymi
xmi_ymi2 = xmi2 + ymi2
lmi = lengths[m][i]
kmi = weights[m][i] / (lmi * lmi)
dEx += kmi * (xmi - (lmi * xmi) / math.sqrt(xmi_ymi2))
dEy += kmi * (ymi - (lmi * ymi) / math.sqrt(xmi_ymi2))
return math.sqrt(dEx * dEx + dEy * dEy)
def _compute_m(self, pos=None, lengths=None, weights=None, id=0):
self.dms = np.array([self._compute_dm(pos, lengths, weights, m)
for m in pos])
m = np.argsort(-self.dms)[id]
return m
def _compute_dxdy(self, pos=None, lengths=None, weights=None, m=None):
dEx, dEy, d2Ex2, d2Ey2, d2Exy, d2Eyx = self._compute_dE(pos,
lengths,
weights,
m)
A = np.array([[d2Ex2, d2Exy], [d2Eyx, d2Ey2]])
B = np.array([[-dEx], [-dEy]])
X = inv(A).dot(B)
dx = X[0]
dy = X[1]
return dx, dy
def _update(self, pos=None, lengths=None, weights=None):
m = self._compute_m(pos, lengths, weights)
dx, dy = self._compute_dxdy(pos, lengths, weights, m)
pos[m][0] += dx
pos[m][1] += dy
return m
def _scale(self, init_pos):
_min = -0.5
_max = 0.5
pos = dict()
max_x = max([init_pos[id][0] for id in init_pos])
min_x = min([init_pos[id][0] for id in init_pos])
max_y = max([init_pos[id][1] for id in init_pos])
min_y = min([init_pos[id][1] for id in init_pos])
for id in init_pos:
x = init_pos[id][0]
y = init_pos[id][1]
# standardize
x = (x - min_x) / (max_x - min_x)
y = (y - min_y) / (max_y - min_y)
# rescale
x = x * (_max - _min) + _min
y = y * (_max - _min) + _min
pos[id] = np.array([x, y])
return pos
def _compute_weights(self, graph):
weights = np.ones((len(graph), len(graph)))
for u, v in graph.edges():
val = graph.edge[u][v].get('weight', 1)
weights[u][v] = val
return weights
def transform(self, graph, normalize=True):
"""Transform."""
lengths = self._compute_all_pairs(graph, weight='len',
normalize=normalize)
weights = self._compute_weights(graph)
if self.init_pos is None:
pos = self._compute_initial_pos(graph)
else:
pos = self._scale(self.init_pos)
effective_n_iter = self.n_iter * len(graph)
for i in range(effective_n_iter):
m = self._update(pos, lengths, weights)
if i % 100 == 0:
logger.debug('iteration %d/%d score:%.2f threshold:%.2f' %
(i, effective_n_iter, self.dms[m], self.stop_eps))
if self.dms[m] < self.stop_eps or self.dms[m] != self.dms[m]:
logger.debug('Stopped at iteration %d/%d with score %.2f' %
(i, effective_n_iter, self.dms[m]))
break
return pos
| gpl-3.0 |
madan96/sympy | examples/advanced/autowrap_ufuncify.py | 45 | 2446 | #!/usr/bin/env python
"""
Setup ufuncs for the legendre polynomials
-----------------------------------------
This example demonstrates how you can use the ufuncify utility in SymPy
to create fast, customized universal functions for use with numpy
arrays. An autowrapped sympy expression can be significantly faster than
what you would get by applying a sequence of the ufuncs shipped with
numpy. [0]
You need to have numpy installed to run this example, as well as a
working fortran compiler.
[0]:
http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/
"""
import sys
from sympy.external import import_module
np = import_module('numpy')
if not np:
sys.exit("Cannot import numpy. Exiting.")
plt = import_module('matplotlib.pyplot')
if not plt:
sys.exit("Cannot import matplotlib.pyplot. Exiting.")
import mpmath
from sympy.utilities.autowrap import ufuncify
from sympy.utilities.lambdify import implemented_function
from sympy import symbols, legendre, pprint
def main():
print(__doc__)
x = symbols('x')
# a numpy array we can apply the ufuncs to
grid = np.linspace(-1, 1, 1000)
# set mpmath precision to 20 significant numbers for verification
mpmath.mp.dps = 20
print("Compiling legendre ufuncs and checking results:")
# Let's also plot the ufunc's we generate
for n in range(6):
# Setup the SymPy expression to ufuncify
expr = legendre(n, x)
print("The polynomial of degree %i is" % n)
pprint(expr)
# This is where the magic happens:
binary_poly = ufuncify(x, expr)
# It's now ready for use with numpy arrays
polyvector = binary_poly(grid)
# let's check the values against mpmath's legendre function
maxdiff = 0
for j in range(len(grid)):
precise_val = mpmath.legendre(n, grid[j])
diff = abs(polyvector[j] - precise_val)
if diff > maxdiff:
maxdiff = diff
print("The largest error in applied ufunc was %e" % maxdiff)
assert maxdiff < 1e-14
# We can also attach the autowrapped legendre polynomial to a sympy
# function and plot values as they are calculated by the binary function
plot1 = plt.pyplot.plot(grid, polyvector, hold=True)
print("Here's a plot with values calculated by the wrapped binary functions")
plt.pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/linear_model/randomized_l1.py | 6 | 24844 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..feature_selection.base import SelectorMixin
from ..utils import (as_float_array, check_random_state, check_X_y, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
SelectorMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def _get_support_mask(self):
"""Get the boolean mask indicating which features are selected.
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected
for retention.
"""
check_is_fitted(self, 'scores_')
return self.scores_ > self.selection_threshold
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold : float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
if C.ndim > 1:
raise ValueError("C should be 1-dimensional array-like, "
"but got a {}-dimensional array-like instead: {}."
.format(C.ndim, C))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float or array-like of shape [n_reg_parameter], optional, default=1
The regularization parameter C in the LogisticRegression.
When C is an array, fit will take each regularization parameter in C
one by one for LogisticRegression and store results for each one
in ``all_scores_``, where columns and rows represent corresponding
reg_parameters and features.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in ascending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stability path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
CVML/scikit-learn | sklearn/neighbors/base.py | 115 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
francisco-dlp/hyperspy | hyperspy/misc/eels/tools.py | 4 | 10889 | import math
import numbers
import logging
import numpy as np
import matplotlib.pyplot as plt
from scipy import constants
from hyperspy.misc.array_tools import rebin
import hyperspy.defaults_parser
_logger = logging.getLogger(__name__)
def _estimate_gain(ns, cs,
weighted=False,
higher_than=None,
plot_results=False,
binning=0,
pol_order=1):
if binning > 0:
factor = 2 ** binning
remainder = np.mod(ns.shape[1], factor)
if remainder != 0:
ns = ns[:, remainder:]
cs = cs[:, remainder:]
new_shape = (ns.shape[0], ns.shape[1] / factor)
ns = rebin(ns, new_shape)
cs = rebin(cs, new_shape)
noise = ns - cs
variance = np.var(noise, 0)
average = np.mean(cs, 0).squeeze()
# Select only the values higher_than for the calculation
if higher_than is not None:
sorting_index_array = np.argsort(average)
average_sorted = average[sorting_index_array]
average_higher_than = average_sorted > higher_than
variance_sorted = variance.squeeze()[sorting_index_array]
variance2fit = variance_sorted[average_higher_than]
average2fit = average_sorted[average_higher_than]
else:
variance2fit = variance
average2fit = average
fit = np.polyfit(average2fit, variance2fit, pol_order)
if weighted is True:
from hyperspy._signals.signal1D import Signal1D
from hyperspy.models.model1d import Model1D
from hyperspy.components1d import Line
s = Signal1D(variance2fit)
s.axes_manager.signal_axes[0].axis = average2fit
m = Model1D(s)
l = Line()
l.a.value = fit[1]
l.b.value = fit[0]
m.append(l)
m.fit(weights=True)
fit[0] = l.b.value
fit[1] = l.a.value
if plot_results is True:
plt.figure()
plt.scatter(average.squeeze(), variance.squeeze())
plt.xlabel('Counts')
plt.ylabel('Variance')
plt.plot(average2fit, np.polyval(fit, average2fit), color='red')
results = {'fit': fit, 'variance': variance.squeeze(),
'counts': average.squeeze()}
return results
def _estimate_correlation_factor(g0, gk, k):
a = math.sqrt(g0 / gk)
e = k * (a - 1) / (a - k)
c = (1 - e) ** 2
return c
def estimate_variance_parameters(
noisy_signal,
clean_signal,
mask=None,
pol_order=1,
higher_than=None,
return_results=False,
plot_results=True,
weighted=False,
store_results="ask"):
"""Find the scale and offset of the Poissonian noise
By comparing an SI with its denoised version (i.e. by PCA),
this plots an
estimation of the variance as a function of the number of counts
and fits a
polynomy to the result.
Parameters
----------
noisy_SI, clean_SI : signal1D.Signal1D instances
mask : numpy bool array
To define the channels that will be used in the calculation.
pol_order : int
The order of the polynomy.
higher_than: float
To restrict the fit to counts over the given value.
return_results : Bool
plot_results : Bool
store_results: {True, False, "ask"}, default "ask"
If True, it stores the result in the signal metadata
Returns
-------
Dictionary with the result of a linear fit to estimate the offset
and scale factor
"""
with noisy_signal.unfolded(), clean_signal.unfolded():
# The rest of the code assumes that the first data axis
# is the navigation axis. We transpose the data if that is not the
# case.
ns = (noisy_signal.data.copy()
if noisy_signal.axes_manager[0].index_in_array == 0
else noisy_signal.data.T.copy())
cs = (clean_signal.data.copy()
if clean_signal.axes_manager[0].index_in_array == 0
else clean_signal.data.T.copy())
if mask is not None:
_slice = [slice(None), ] * len(ns.shape)
_slice[noisy_signal.axes_manager.signal_axes[0].index_in_array]\
= ~mask
ns = ns[_slice]
cs = cs[_slice]
results0 = _estimate_gain(
ns, cs, weighted=weighted, higher_than=higher_than,
plot_results=plot_results, binning=0, pol_order=pol_order)
results2 = _estimate_gain(
ns, cs, weighted=weighted, higher_than=higher_than,
plot_results=False, binning=2, pol_order=pol_order)
c = _estimate_correlation_factor(results0['fit'][0],
results2['fit'][0], 4)
message = ("Gain factor: %.2f\n" % results0['fit'][0] +
"Gain offset: %.2f\n" % results0['fit'][1] +
"Correlation factor: %.2f\n" % c)
if store_results == "ask":
while is_ok not in ("Yes", "No"):
is_ok = input(
message +
"Would you like to store the results (Yes/No)?")
is_ok = is_ok == "Yes"
else:
is_ok = store_results
_logger.info(message)
if is_ok:
noisy_signal.metadata.set_item(
"Signal.Noise_properties.Variance_linear_model.gain_factor",
results0['fit'][0])
noisy_signal.metadata.set_item(
"Signal.Noise_properties.Variance_linear_model.gain_offset",
results0['fit'][1])
noisy_signal.metadata.set_item(
"Signal.Noise_properties.Variance_linear_model."
"correlation_factor",
c)
noisy_signal.metadata.set_item(
"Signal.Noise_properties.Variance_linear_model." +
"parameters_estimation_method",
'HyperSpy')
if return_results is True:
return results0
def power_law_perc_area(E1, E2, r):
a = E1
b = E2
return 100 * ((a ** r * r - a ** r) * (a / (a ** r * r - a ** r) -
(b + a) / ((b + a) ** r * r - (b + a) ** r))) / a
def rel_std_of_fraction(a, std_a, b, std_b, corr_factor=1):
rel_a = std_a / a
rel_b = std_b / b
return np.sqrt(rel_a ** 2 + rel_b ** 2 -
2 * rel_a * rel_b * corr_factor)
def ratio(edge_A, edge_B):
a = edge_A.intensity.value
std_a = edge_A.intensity.std
b = edge_B.intensity.value
std_b = edge_B.intensity.std
ratio = a / b
ratio_std = ratio * rel_std_of_fraction(a, std_a, b, std_b)
_logger.info("Ratio %s/%s %1.3f +- %1.3f ",
edge_A.name,
edge_B.name,
a / b,
1.96 * ratio_std)
return ratio, ratio_std
def eels_constant(s, zlp, t):
r"""Calculate the constant of proportionality (k) in the relationship
between the EELS signal and the dielectric function.
dielectric function from a single scattering distribution (SSD) using
the Kramers-Kronig relations.
.. math::
S(E)=\frac{I_{0}t}{\pi a_{0}m_{0}v^{2}}\ln\left[1+\left(\frac{\beta}
{\theta_{E}}\right)^{2}\right]\Im(\frac{-1}{\epsilon(E)})=
k\Im(\frac{-1}{\epsilon(E)})
Parameters
----------
zlp: {number, BaseSignal}
If the ZLP is the same for all spectra, the intengral of the ZLP
can be provided as a number. Otherwise, if the ZLP intensity is not
the same for all spectra, it can be provided as i) a Signal
of the same dimensions as the current signal containing the ZLP
spectra for each location ii) a Signal of signal dimension 0
and navigation_dimension equal to the current signal containing the
integrated ZLP intensity.
t: {None, number, BaseSignal}
The sample thickness in nm. If the thickness is the same for all
spectra it can be given by a number. Otherwise, it can be provided
as a Signal with signal dimension 0 and navigation_dimension equal
to the current signal.
Returns
-------
k: Signal instance
"""
# Constants and units
me = constants.value(
'electron mass energy equivalent in MeV') * 1e3 # keV
# Mapped parameters
try:
e0 = s.metadata.Acquisition_instrument.TEM.beam_energy
except BaseException:
raise AttributeError("Please define the beam energy."
"You can do this e.g. by using the "
"set_microscope_parameters method")
try:
beta = s.metadata.Acquisition_instrument.\
TEM.Detector.EELS.collection_angle
except BaseException:
raise AttributeError("Please define the collection semi-angle."
"You can do this e.g. by using the "
"set_microscope_parameters method")
axis = s.axes_manager.signal_axes[0]
eaxis = axis.axis.copy()
if eaxis[0] == 0:
# Avoid singularity at E=0
eaxis[0] = 1e-10
if isinstance(zlp, hyperspy.signal.BaseSignal):
if (zlp.axes_manager.navigation_dimension ==
s.axes_manager.navigation_dimension):
if zlp.axes_manager.signal_dimension == 0:
i0 = zlp.data
else:
i0 = zlp.integrate1D(axis.index_in_axes_manager).data
else:
raise ValueError('The ZLP signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
# The following prevents errors if the signal is a single spectrum
if len(i0) != 1:
i0 = i0.reshape(
np.insert(i0.shape, axis.index_in_array, 1))
elif isinstance(zlp, numbers.Number):
i0 = zlp
else:
raise ValueError('The zero-loss peak input is not valid, it must be\
in the BaseSignal class or a Number.')
if isinstance(t, hyperspy.signal.BaseSignal):
if (t.axes_manager.navigation_dimension ==
s.axes_manager.navigation_dimension) and (
t.axes_manager.signal_dimension == 0):
t = t.data
t = t.reshape(
np.insert(t.shape, axis.index_in_array, 1))
else:
raise ValueError('The thickness signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
# Kinetic definitions
ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2
tgt = e0 * (2 * me + e0) / (me + e0)
k = s.__class__(
data=(t * i0 / (332.5 * ke)) * np.log(1 + (beta * tgt / eaxis) ** 2))
k.metadata.General.title = "EELS proportionality constant K"
return k
| gpl-3.0 |
kastnerkyle/crikey | kdllib.py | 1 | 180433 | # License: BSD 3-clause
# Authors: Kyle Kastner
from __future__ import print_function
import numpy as np
import uuid
from numpy.lib.stride_tricks import as_strided
from scipy import linalg, fftpack
from scipy.cluster.vq import kmeans, vq
from scipy.io import wavfile
import scipy.signal as sg
import shutil
from collections import Iterable, defaultdict
import socket
import wave
import tarfile
import zipfile
import os
import glob
import re
import copy
from collections import Counter
import time
import sys
import inspect
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import cPickle as pickle
except ImportError:
import pickle
import itertools
import time
try:
import Queue
except ImportError:
import queue as Queue
import threading
import theano
import theano.tensor as tensor
from theano.tensor import nnet
from theano.tensor.nnet.abstract_conv import conv2d_grad_wrt_inputs
# Doesn't support binomial with n > 1??
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.scan_module.scan_utils import infer_shape
from theano.gof.fg import MissingInputError
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib2 as urllib
import locale
import logging
sys.setrecursionlimit(40000)
"""
init logging
"""
logging.basicConfig(level=logging.INFO,
format='%(message)s')
logger = logging.getLogger(__name__)
string_f = StringIO()
ch = logging.StreamHandler(string_f)
# Automatically put the HTML break characters on there
formatter = logging.Formatter('%(message)s<br>')
ch.setFormatter(formatter)
logger.addHandler(ch)
"""
end logging
"""
"""
begin decorators
"""
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
"""
end decorators
"""
"""
begin metautils
"""
def get_generic_name():
# may need to make this a search for the first non-kdllib reference
# make generic name from highest calling context
prev_function_name = None
for i in range(len(inspect.stack())):
(frame, filename, line_number,
function_name, lines, index) = inspect.stack()[i]
#print(frame, filename, line_number, function_name, lines, index)
# Use stack to get easier function name than parsing the code itself
if i > 0:
_, _, _, prev_function_name, _, _ = inspect.stack()[i - 1]
else:
prev_function_name = function_name
script_name = filename.split(os.sep)[-1]
lib_location = os.path.realpath(__file__)
lib_name = lib_location.split(os.sep)[-1]
# cover .py and .pyc
if script_name != lib_name and script_name != lib_name[:-1]:
name = script_name + "_" + prev_function_name
#print(frame, filename, line_number, function_name, lines, index)
return name
raise ValueError("Issue in generic name getter")
# Many of these from Ishaan G.
def print_param_info(params):
"""Print information about the parameters in the given param set."""
params = sorted(params, key=lambda p: p.name)
values = [p.get_value(borrow=True) for p in params]
shapes = [p.shape for p in values]
logger.info("Params:")
for param, value, shape in zip(params, values, shapes):
logger.info("\t%s (%s)" % (param.name, ",".join([str(x) for x in shape])))
total_param_count = 0
for shape in shapes:
param_count = 1
for dim in shape:
param_count *= dim
total_param_count += param_count
logger.info("Total parameter count: %f M" % (total_param_count / float(1E6)))
def flatten(seq):
l = []
for elt in seq:
t = type(elt)
if t is tuple or t is list:
for elt2 in flatten(elt):
l.append(elt2)
else:
l.append(elt)
return l
_params = {}
def param(name=None, *args, **kwargs):
"""
A wrapper for `theano.shared` which enables parameter sharing in models.
Creates and returns theano shared variables similarly to `theano.shared`,
except if you try to create a param with the same name as a
previously-created one, `param(...)` will just return the old one instead of
making a new one.
This constructor also adds a `param` attribute to the shared variables it
creates, so that you can easily search a graph for all params.
"""
had_ext = False
if type(name) is tuple or type(name) is list:
name = flatten(name)
name_ext = name[1:]
name = name[0]
had_ext = True
if name is None:
name = get_generic_name()
name = name + "_1"
sub = name.split("_")
# skip purely numbers
ss = []
for s in sub:
try:
int(s)
except:
ss.append(s)
sub = ss
def key_fn(k):
tt = k.split("_")[:len(sub)]
return tt
matches = [k for k in _params.keys()
if key_fn(k) == sub]
if len(matches) > 0:
num = len(matches) + 1
name = "_".join(sub) + "_%i" % num
# Ability to append info
if had_ext:
ns = name.split("_")
name = "_".join(ns[:-1] + name_ext + [ns[-1]])
kwargs['name'] = name
param = as_shared(*args, **kwargs)
param.param = True
param.name = name
_params[name] = param
return _params[name]
def delete_params(name):
to_delete = [p_name for p_name in _params if name in p_name]
for p_name in to_delete:
del _params[p_name]
def param_search(node, critereon):
"""
Traverse the Theano graph starting at `node` and return a list of all nodes
which match the `critereon` function. When optimizing a cost function, you
can use this to get a list of all of the trainable params in the graph, like
so:
`lib.search(cost, lambda x: hasattr(x, "param"))`
"""
def _search(node, critereon, visited):
if node in visited:
return []
visited.add(node)
results = []
if isinstance(node, tensor.Apply):
for inp in node.inputs:
results += _search(inp, critereon, visited)
else: # Variable node
if critereon(node):
results.append(node)
if node.owner is not None:
results += _search(node.owner, critereon, visited)
return results
return _search(node, critereon, set())
def save_params(path):
raise ValueError("fix it, unify with save coroutine")
param_vals = {}
for name, param in _params.iteritems():
param_vals[name] = param.get_value()
with open(path, 'wb') as f:
pickle.dump(param_vals, f)
def load_params(path):
raise ValueError("fix it, unify")
with open(path, 'rb') as f:
param_vals = pickle.load(f)
for name, val in param_vals.iteritems():
_params[name].set_value(val)
def clear_all_params():
to_delete = [p_name for p_name in _params]
for p_name in to_delete:
del _params[p_name]
"""
end metautils
"""
"""
begin datasets
"""
def soundsc(X, copy=True):
"""
Approximate implementation of soundsc from MATLAB without the audio playing.
Parameters
----------
X : ndarray
Signal to be rescaled
copy : bool, optional (default=True)
Whether to make a copy of input signal or operate in place.
Returns
-------
X_sc : ndarray
(-1, 1) scaled version of X as float32, suitable for writing
with scipy.io.wavfile
"""
X = np.array(X, copy=copy)
X = (X - X.min()) / (X.max() - X.min())
X = 2 * X - 1
X = .9 * X
X = X * 2 ** 15
return X.astype('int16')
def get_script_name():
script_path = os.path.abspath(sys.argv[0])
# Assume it ends with .py ...
script_name = script_path.split(os.sep)[-1]
return script_name
def get_resource_dir(name, resource_dir=None, folder=None, create_dir=True):
""" Get dataset directory path """
if not resource_dir:
resource_dir = os.getenv("KDLLIB_DIR", os.path.join(
os.path.expanduser("~"), "kdllib_dir"))
if folder is None:
resource_dir = os.path.join(resource_dir, name)
else:
resource_dir = os.path.join(resource_dir, folder)
if create_dir:
if not os.path.exists(resource_dir):
os.makedirs(resource_dir)
return resource_dir
def download(url, server_fname, local_fname=None, progress_update_percentage=5,
bypass_certificate_check=False):
"""
An internet download utility modified from
http://stackoverflow.com/questions/22676/
how-do-i-download-a-file-over-http-using-python/22776#22776
"""
if bypass_certificate_check:
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
u = urllib.urlopen(url, context=ctx)
else:
u = urllib.urlopen(url)
if local_fname is None:
local_fname = server_fname
full_path = local_fname
meta = u.info()
with open(full_path, 'wb') as f:
try:
file_size = int(meta.get("Content-Length"))
except TypeError:
logger.info("WARNING: Cannot get file size, displaying bytes instead!")
file_size = 100
logger.info("Downloading: %s Bytes: %s" % (server_fname, file_size))
file_size_dl = 0
block_sz = int(1E7)
p = 0
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if (file_size_dl * 100. / file_size) > p:
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl *
100. / file_size)
logger.info(status)
p += progress_update_percentage
def midiwrap():
"""
Wrapper to midi read and midi write
"""
try:
sys.path.insert(1, get_resource_dir(""))
from midi.utils import midiread, midiwrite
sys.path.pop(1)
except ImportError:
logger.info("Need GPL licensed midi utils, downloading...",
"http://www.iro.umontreal.ca/~lisa/deep/midi.zip")
url = "http://www.iro.umontreal.ca/~lisa/deep/midi.zip"
partial_path = get_resource_dir("")
full_path = os.path.join(partial_path, "midi.zip")
if not os.path.exists(full_path):
download(url, full_path)
zip_ref = zipfile.ZipFile(full_path, 'r')
zip_ref.extractall(partial_path)
zip_ref.close()
sys.path.insert(1, get_resource_dir(""))
from midi.utils import midiread, midiwrite
sys.path.pop(1)
return midiread, midiwrite
class BlizzardThread(threading.Thread):
"""Blizzard Thread"""
def __init__(self, queue, out_queue, preproc_fn):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
self.preproc_fn = preproc_fn
def run(self):
while True:
# Grabs image path from queue
wav_paths, texts = self.queue.get()
text_group = texts
wav_group = [wavfile.read(wp)[1] for wp in wav_paths]
wav_group = [w.astype('float32') / (2 ** 15) for w in wav_group]
wav_group = [self.preproc_fn(wi) for wi in wav_group]
self.out_queue.put((wav_group, text_group))
self.queue.task_done()
class Blizzard_dataset(object):
def __init__(self, minibatch_size=2,
blizzard_path='/home/kkastner/blizzard_data'):
self.n_fft = 256
self.n_step = self.n_fft // 4
self.blizzard_path = blizzard_path
# extracted text
self.text_path = os.path.join(self.blizzard_path, 'train', 'segmented',
'prompts.gui')
with open(self.text_path, 'r') as f:
tt = f.readlines()
wav_names = [t.strip() for t in tt[::3]]
raw_other = tt[2::3]
raw_text = [t.strip().lower() for t in tt[1::3]]
all_symbols = set()
for rt in raw_text:
all_symbols = set(list(all_symbols) + list(set(rt)))
self.wav_names = wav_names
self.text = raw_text
self.symbols = sorted(list(all_symbols))
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
raise ValueError()
# These files come from converting the Blizzard mp3 files to wav,
# then placing in a directory called blizzard_wav
self.wav_paths = glob.glob(os.path.join(self.blizzard_path,
'blizzard_wav', '*.wav'))
self.minibatch_size = minibatch_size
self._lens = np.array([float(len(t)) for t in self.text])
# Get only the smallest 50% of files for now
_cut = np.percentile(self._lens, 5)
_ind = np.where(self._lens <= _cut)[0]
self.text = [self.text[i] for i in _ind]
self.wav_names = [self.wav_names[i] for i in _ind]
assert len(self.text) == len(self.wav_names)
final_wav_paths = []
final_text = []
final_wav_names = []
for n, (w, t) in enumerate(zip(self.wav_names, self.text)):
parts = w.split("chp")
name = parts[0]
chapter = [pp for pp in parts[1].split("_") if pp != ''][0]
for p in self.wav_paths:
if name in p and chapter in p:
final_wav_paths.append(p)
final_wav_names.append(w)
final_text.append(t)
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
raise ValueError()
break
# resort into shortest -> longest order
sorted_inds = np.argsort([len(t) for t in final_text])
st = [final_text[i] for i in sorted_inds]
swp = [final_wav_paths[i] for i in sorted_inds]
swn = [final_wav_names[i] for i in sorted_inds]
self.wav_names = swn
self.wav_paths = swp
self.text = st
assert len(self.wav_names) == len(self.wav_paths)
assert len(self.wav_paths) == len(self.text)
self.n_per_epoch = len(self.wav_paths)
self.n_samples_seen_ = 0
self.buffer_size = 2
self.minibatch_size = minibatch_size
self.input_qsize = 5
self.min_input_qsize = 2
if len(self.wav_paths) % self.minibatch_size != 0:
logger.info("WARNING: Sample size not an even multiple of minibatch size")
logger.info("Truncating...")
self.wav_paths = self.wav_paths[:-(
len(self.wav_paths) % self.minibatch_size)]
self.text = self.text[:-(
len(self.text) % self.minibatch_size)]
assert len(self.wav_paths) % self.minibatch_size == 0
assert len(self.text) % self.minibatch_size == 0
self.grouped_wav_paths = zip(*[iter(self.wav_paths)] *
self.minibatch_size)
self.grouped_text = zip(*[iter(self.text)] *
self.minibatch_size)
assert len(self.grouped_wav_paths) == len(self.grouped_text)
self._init_queues()
def _init_queues(self):
# Infinite...
self.grouped_elements = itertools.cycle(zip(self.grouped_wav_paths,
self.grouped_text))
self.queue = Queue.Queue()
self.out_queue = Queue.Queue(maxsize=self.buffer_size)
for i in range(1):
self.it = BlizzardThread(self.queue, self.out_queue, self._pre)
self.it.setDaemon(True)
self.it.start()
# Populate queue with some paths to image data
for n, _ in enumerate(range(self.input_qsize)):
group = self.grouped_elements.next()
self.queue.put(group)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
return self._step()
def reset(self):
self.n_samples_seen_ = 0
self._init_queues()
def _step(self):
if self.n_samples_seen_ >= self.n_per_epoch:
self.reset()
raise StopIteration("End of epoch")
wav_group, text_group = self.out_queue.get()
self.n_samples_seen_ += self.minibatch_size
if self.queue.qsize() <= self.min_input_qsize:
for i in range(self.input_qsize):
group = self.grouped_elements.next()
self.queue.put(group)
return wav_group, text_group
def fetch_sample_audio_chords(n_samples=None):
url = "https://dl.dropboxusercontent.com/u/15378192/piano_chords.tar.gz"
partial_path = get_resource_dir("chords")
full_path = os.path.join(partial_path, "piano_chords.tar.gz")
if not os.path.exists(full_path):
download(url, full_path)
tf = tarfile.open(full_path)
wav_names = [fname for fname in tf.getnames()
if ".wav" in fname.split(os.sep)[-1]]
chords = []
logger.info("Loading audio files...")
for wav_name in wav_names[:n_samples]:
f = tf.extractfile(wav_name)
fs, d = wavfile.read(f)
d = d.astype('float32') / (2 ** 15)
chords.append(d)
return fs, chords, wav_names
def fetch_sample_speech_fruit(n_samples=None):
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
partial_path = get_resource_dir("fruit")
full_path = os.path.join(partial_path, "audio.tar.gz")
if not os.path.exists(full_path):
download(url, full_path)
tf = tarfile.open(full_path)
wav_names = [fname for fname in tf.getnames()
if ".wav" in fname.split(os.sep)[-1]]
speech = []
logger.info("Loading speech files...")
for wav_name in wav_names[:n_samples]:
f = tf.extractfile(wav_name)
fs, d = wavfile.read(f)
d = d.astype('float32') / (2 ** 15)
speech.append(d)
return fs, speech, wav_names
def fetch_sample_speech_tapestry():
url = "https://www.dropbox.com/s/qte66a7haqspq2g/tapestry.wav?dl=1"
wav_path = "tapestry.wav"
if not os.path.exists(wav_path):
download(url, wav_path)
fs, d = wavfile.read(wav_path)
d = d.astype('float32') / (2 ** 15)
# file is stereo? - just choose one channel
return fs, d
def _wav2array(nchannels, sampwidth, data):
# wavio.py
# Author: Warren Weckesser
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""data must be the string containing the bytes from the wav file."""
num_samples, remainder = divmod(len(data), sampwidth * nchannels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of '
'sampwidth * num_channels.')
if sampwidth > 4:
raise ValueError("sampwidth must not be greater than 4.")
if sampwidth == 3:
a = np.empty((num_samples, nchannels, 4), dtype=np.uint8)
raw_bytes = np.fromstring(data, dtype=np.uint8)
a[:, :, :sampwidth] = raw_bytes.reshape(-1, nchannels, sampwidth)
a[:, :, sampwidth:] = (a[:, :, sampwidth - 1:sampwidth] >> 7) * 255
result = a.view('<i4').reshape(a.shape[:-1])
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sampwidth == 1 else 'i'
a = np.fromstring(data, dtype='<%s%d' % (dt_char, sampwidth))
result = a.reshape(-1, nchannels)
return result
def readwav(file):
# wavio.py
# Author: Warren Weckesser
# License: BSD 3-Clause (http://opensource.org/licenses/BSD-3-Clause)
"""
Read a wav file.
Returns the frame rate, sample width (in bytes) and a numpy array
containing the data.
This function does not read compressed wav files.
"""
wav = wave.open(file)
rate = wav.getframerate()
nchannels = wav.getnchannels()
sampwidth = wav.getsampwidth()
nframes = wav.getnframes()
data = wav.readframes(nframes)
wav.close()
array = _wav2array(nchannels, sampwidth, data)
return rate, sampwidth, array
def fetch_sample_speech_ono(n_samples=None):
datapath = os.path.join("ono_wav", "*wav")
wav_names = glob.glob(datapath)
wav_names = [w for w in wav_names
if "EKENWAY" in w]
wav_names = [w for w in wav_names
if "PAIN" in w]
speech = []
logger.info("Loading speech files...")
for wav_name in wav_names[:n_samples]:
fs, bitw, d = readwav(wav_name)
# 24 bit but only 16 used???
d = d.astype('float32') / (2 ** 15)
d = sg.decimate(d, 6, ftype="fir")[::6]
# decimate to 8k
fs = 8000
speech.append(d)
return fs, speech, wav_names
def fetch_sample_speech_walla(n_samples=None):
datapath = os.path.join("walla_wav", "*wav")
names = glob.glob(datapath)
speech = []
wav_names = []
logger.info("Loading speech files...")
for name in names[:n_samples]:
fs, bitw, d = readwav(name)
d = d.astype('float32') / (2 ** 15)
inds = np.arange(0, len(d), 16000)
for i, j in zip(inds[:-1], inds[1:]):
dij = d[i:j]
dij = sg.decimate(dij, 2, ftype="iir")[::2]
# decimate to 8k
fs = 8000
speech.append(dij)
wav_names.append(name)
if len(speech) > 200:
break
return fs, speech, wav_names
def complex_to_real_view(arr_c):
# Inplace view from complex to r, i as separate columns
assert arr_c.dtype in [np.complex64, np.complex128]
shp = arr_c.shape
dtype = np.float64 if arr_c.dtype == np.complex128 else np.float32
arr_r = arr_c.ravel().view(dtype=dtype).reshape(shp[0], 2 * shp[1])
return arr_r
def real_to_complex_view(arr_r):
# Inplace view from real, image as columns to complex
assert arr_r.dtype not in [np.complex64, np.complex128]
shp = arr_r.shape
dtype = np.complex128 if arr_r.dtype == np.float64 else np.complex64
arr_c = arr_r.ravel().view(dtype=dtype).reshape(shp[0], shp[1] // 2)
return arr_c
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
overlap_sz = window_size - window_step
new_shape = X.shape[:-1] + ((X.shape[-1] - overlap_sz) // window_step, window_size)
new_strides = X.strides[:-1] + (window_step * X.strides[-1],) + X.strides[-1:]
X_strided = as_strided(X, shape=new_shape, strides=new_strides)
return X_strided
def halfoverlap(X, window_size):
"""
Create an overlapped version of X using 50% of window_size as overlap.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError("Window size must be even!")
window_step = window_size // 2
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def invert_halfoverlap(X_strided):
"""
Invert ``halfoverlap`` function to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
# Hardcoded 50% overlap! Can generalize later...
n_rows, n_cols = X_strided.shape
X = np.zeros((((int(n_rows // 2) + 1) * n_cols),)).astype(X_strided.dtype)
start_index = 0
end_index = n_cols
window_step = n_cols // 2
for row in range(X_strided.shape[0]):
X[start_index:end_index] += X_strided[row]
start_index += window_step
end_index += window_step
return X
def overlap_add(X_strided, window_step, wsola=False):
"""
overlap add to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
window_step : int
step size for overlap add
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
n_rows, window_size = X_strided.shape
# Start with largest size (no overlap) then truncate after we finish
# +2 for one window on each side
X = np.zeros(((n_rows + 2) * window_size,)).astype(X_strided.dtype)
start_index = 0
total_windowing_sum = np.zeros((X.shape[0]))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(window_size) / (
window_size - 1))
for i in range(n_rows):
end_index = start_index + window_size
if wsola:
offset_size = window_size - window_step
offset = xcorr_offset(X[start_index:start_index + offset_size],
X_strided[i, :offset_size])
ss = start_index - offset
st = end_index - offset
if start_index - offset < 0:
ss = 0
st = 0 + (end_index - start_index)
X[ss:st] += X_strided[i]
total_windowing_sum[ss:st] += win
start_index = start_index + window_step
else:
X[start_index:end_index] += X_strided[i]
total_windowing_sum[start_index:end_index] += win
start_index += window_step
# Not using this right now
#X = np.real(X) / (total_windowing_sum + 1)
X = X[:end_index]
return X
def stft(X, fftsize=128, step="half", mean_normalize=True, real=False,
compute_onesided=True):
"""
Compute STFT for 1D real valued input X
"""
if real:
local_fft = fftpack.rfft
cut = -1
else:
local_fft = fftpack.fft
cut = None
if compute_onesided:
cut = fftsize // 2
if mean_normalize:
X -= X.mean()
if step == "half":
X = halfoverlap(X, fftsize)
else:
X = overlap(X, fftsize, step)
size = fftsize
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
X = X * win[None]
X = local_fft(X)[:, :cut]
return X
def istft(X, fftsize=128, step="half", wsola=False, mean_normalize=True,
real=False, compute_onesided=True):
"""
Compute ISTFT for STFT transformed X
"""
if real:
local_ifft = fftpack.irfft
X_pad = np.zeros((X.shape[0], X.shape[1] + 1)) + 0j
X_pad[:, :-1] = X
X = X_pad
else:
local_ifft = fftpack.ifft
if compute_onesided:
X_pad = np.zeros((X.shape[0], 2 * X.shape[1])) + 0j
X_pad[:, :fftsize // 2] = X
X_pad[:, fftsize // 2:] = 0
X = X_pad
X = local_ifft(X).astype("float64")
if step == "half":
X = invert_halfoverlap(X)
else:
X = overlap_add(X, step, wsola=wsola)
if mean_normalize:
X -= np.mean(X)
return X
def mdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
X = halfoverlap(X, N)
X = sine_window(X)
n, k = np.meshgrid(np.arange(N), np.arange(M))
# Use transpose due to "samples as rows" convention
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M).T
return np.dot(X, tf)
def imdct_slow(X, dctsize=128):
M = dctsize
N = 2 * dctsize
N_0 = (M + 1) / 2
N_4 = N / 4
n, k = np.meshgrid(np.arange(N), np.arange(M))
# inverse *is not* transposed
tf = np.cos(np.pi * (n + N_0) * (k + 0.5) / M)
X_r = np.dot(X, tf) / N_4
X_r = sine_window(X_r)
X = invert_halfoverlap(X_r)
return X
def herz_to_mel(freqs):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(freqs, np.ndarray):
freqs = np.array(freqs)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (freqs < bark_freq)
mel = 0. * freqs
mel[lin_pts] = (freqs[lin_pts] - f_0) / f_sp
mel[~lin_pts] = bark_pt + np.log(freqs[~lin_pts] / bark_freq) / np.log(
log_step)
return mel
def mel_to_herz(mel):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
f_0 = 0 # 133.33333
f_sp = 200 / 3. # 66.66667
bark_freq = 1000.
bark_pt = (bark_freq - f_0) / f_sp
# The magic 1.0711703 which is the ratio needed to get from 1000 Hz
# to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz
# and the preceding linear filter center at 933.33333 Hz
# (actually 1000/933.33333 = 1.07142857142857 and
# exp(log(6.4)/27) = 1.07117028749447)
if not isinstance(mel, np.ndarray):
mel = np.array(mel)[None]
log_step = np.exp(np.log(6.4) / 27)
lin_pts = (mel < bark_pt)
freqs = 0. * mel
freqs[lin_pts] = f_0 + f_sp * mel[lin_pts]
freqs[~lin_pts] = bark_freq * np.exp(np.log(log_step) * (
mel[~lin_pts] - bark_pt))
return freqs
def mel_freq_weights(n_fft, fs, n_filts=None, width=None):
"""
Based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
min_freq = 0
max_freq = fs // 2
if width is None:
width = 1.
if n_filts is None:
n_filts = int(herz_to_mel(max_freq) / 2) + 1
else:
n_filts = int(n_filts)
assert n_filts > 0
weights = np.zeros((n_filts, n_fft))
fft_freqs = np.arange(n_fft // 2) / n_fft * fs
min_mel = herz_to_mel(min_freq)
max_mel = herz_to_mel(max_freq)
partial = np.arange(n_filts + 2) / (n_filts + 1.) * (max_mel - min_mel)
bin_freqs = mel_to_herz(min_mel + partial)
bin_bin = np.round(bin_freqs / fs * (n_fft - 1))
for i in range(n_filts):
fs_i = bin_freqs[i + np.arange(3)]
fs_i = fs_i[1] + width * (fs_i - fs_i[1])
lo_slope = (fft_freqs - fs_i[0]) / float(fs_i[1] - fs_i[0])
hi_slope = (fs_i[2] - fft_freqs) / float(fs_i[2] - fs_i[1])
weights[i, :n_fft // 2] = np.maximum(
0, np.minimum(lo_slope, hi_slope))
# Constant amplitude multiplier
weights = np.diag(2. / (bin_freqs[2:n_filts + 2]
- bin_freqs[:n_filts])).dot(weights)
weights[:, n_fft // 2:] = 0
return weights
def time_attack_agc(X, fs, t_scale=0.5, f_scale=1.):
"""
AGC based on code by Dan Ellis
http://labrosa.ee.columbia.edu/matlab/tf_agc/
"""
# 32 ms grid for FFT
n_fft = 2 ** int(np.log(0.032 * fs) / np.log(2))
f_scale = float(f_scale)
window_size = n_fft
window_step = window_size // 2
X_freq = stft(X, window_size, mean_normalize=False)
fft_fs = fs / window_step
n_bands = max(10, 20 / f_scale)
mel_width = f_scale * n_bands / 10.
f_to_a = mel_freq_weights(n_fft, fs, n_bands, mel_width)
f_to_a = f_to_a[:, :n_fft // 2]
audiogram = np.abs(X_freq).dot(f_to_a.T)
fbg = np.zeros_like(audiogram)
state = np.zeros((audiogram.shape[1],))
alpha = np.exp(-(1. / fft_fs) / t_scale)
for i in range(len(audiogram)):
state = np.maximum(alpha * state, audiogram[i])
fbg[i] = state
sf_to_a = np.sum(f_to_a, axis=0)
E = np.diag(1. / (sf_to_a + (sf_to_a == 0)))
E = E.dot(f_to_a.T)
E = fbg.dot(E.T)
E[E <= 0] = np.min(E[E > 0])
ts = istft(X_freq / E, window_size, mean_normalize=False)
return ts, X_freq, E
def sine_window(X):
"""
Apply a sinusoid window to X.
Parameters
----------
X : ndarray, shape=(n_samples, n_features)
Input array of samples
Returns
-------
X_windowed : ndarray, shape=(n_samples, n_features)
Windowed version of X.
"""
i = np.arange(X.shape[1])
win = np.sin(np.pi * (i + 0.5) / X.shape[1])
row_stride = 0
col_stride = win.itemsize
strided_win = as_strided(win, shape=X.shape,
strides=(row_stride, col_stride))
return X * strided_win
def complex_to_abs(arr_c):
return np.abs(arr_c)
def complex_to_angle(arr_c):
return np.angle(arr_c)
def abs_and_angle_to_complex(arr_abs, arr_angle):
# abs(f_c2 - f_c) < 1E-15
return arr_abs * np.exp(1j * arr_angle)
def angle_to_sin_cos(arr_angle):
return np.hstack((np.sin(arr_angle), np.cos(arr_angle)))
def sin_cos_to_angle(arr_sin, arr_cos):
return np.arctan2(arr_sin, arr_cos)
def xcorr_offset(x1, x2):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
x1 = x1 - x1.mean()
x2 = x2 - x2.mean()
frame_size = len(x2)
half = frame_size // 2
corrs = np.convolve(x1.astype('float32'), x2[::-1].astype('float32'))
corrs[:half] = -1E30
corrs[-half:] = -1E30
offset = corrs.argmax() - len(x1)
return offset
def invert_spectrogram(X_s, step, calculate_offset=True, set_zero_phase=True):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
size = int(X_s.shape[1] // 2)
wave = np.zeros((X_s.shape[0] * step + size))
# Getting overflow warnings with 32 bit...
wave = wave.astype('float64')
total_windowing_sum = np.zeros((X_s.shape[0] * step + size))
win = 0.54 - .46 * np.cos(2 * np.pi * np.arange(size) / (size - 1))
est_start = int(size // 2) - 1
est_end = est_start + size
for i in range(X_s.shape[0]):
wave_start = int(step * i)
wave_end = wave_start + size
if set_zero_phase:
spectral_slice = X_s[i].real + 0j
else:
# already complex
spectral_slice = X_s[i]
# Don't need fftshift due to different impl.
wave_est = np.real(np.fft.ifft(spectral_slice))[::-1]
if calculate_offset and i > 0:
offset_size = size - step
if offset_size <= 0:
print("WARNING: Large step size >50\% detected! "
"This code works best with high overlap - try "
"with 75% or greater")
offset_size = step
offset = xcorr_offset(wave[wave_start:wave_start + offset_size],
wave_est[est_start:est_start + offset_size])
else:
offset = 0
wave[wave_start:wave_end] += win * wave_est[
est_start - offset:est_end - offset]
total_windowing_sum[wave_start:wave_end] += win
wave = np.real(wave) / (total_windowing_sum + 1E-6)
return wave
def iterate_invert_spectrogram(X_s, fftsize, step, n_iter=10, verbose=False,
complex_input=False):
"""
Under MSR-LA License
Based on MATLAB implementation from Spectrogram Inversion Toolbox
References
----------
D. Griffin and J. Lim. Signal estimation from modified
short-time Fourier transform. IEEE Trans. Acoust. Speech
Signal Process., 32(2):236-243, 1984.
Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory
Model Inversion for Sound Separation. Proc. IEEE-ICASSP,
Adelaide, 1994, II.77-80.
Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal
Estimation from Modified Short-Time Fourier Transform
Magnitude Spectra. IEEE Transactions on Audio Speech and
Language Processing, 08/2007.
"""
reg = np.max(X_s) / 1E8
X_best = copy.deepcopy(X_s)
for i in range(n_iter):
if verbose:
print("Runnning iter %i" % i)
if i == 0 and not complex_input:
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=True)
else:
# Calculate offset was False in the MATLAB version
# but in mine it massively improves the result
# Possible bug in my impl?
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
est = stft(X_t, fftsize=fftsize, step=step, compute_onesided=False)
phase = est / np.maximum(reg, np.abs(est))
phase = phase[:len(X_s)]
X_s = X_s[:len(phase)]
X_best = X_s * phase
X_t = invert_spectrogram(X_best, step, calculate_offset=True,
set_zero_phase=False)
return np.real(X_t)
def voiced_unvoiced(X, window_size=256, window_step=128, copy=True):
"""
Voiced unvoiced detection from a raw signal
Based on code from:
https://www.clear.rice.edu/elec532/PROJECTS96/lpc/code.html
Other references:
http://www.seas.ucla.edu/spapl/code/harmfreq_MOLRT_VAD.m
Parameters
----------
X : ndarray
Raw input signal
window_size : int, optional (default=256)
The window size to use, in samples.
window_step : int, optional (default=128)
How far the window steps after each calculation, in samples.
copy : bool, optional (default=True)
Whether to make a copy of the input array or allow in place changes.
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
# Padding
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
clipping_factor = 0.6
b, a = sg.butter(10, np.pi * 9 / 40)
voiced_unvoiced = np.zeros((n_windows, 1))
period = np.zeros((n_windows, 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
XX *= sg.hamming(len(XX))
XX = sg.lfilter(b, a, XX)
left_max = np.max(np.abs(XX[:len(XX) // 3]))
right_max = np.max(np.abs(XX[-len(XX) // 3:]))
clip_value = clipping_factor * np.min([left_max, right_max])
XX_clip = np.clip(XX, clip_value, -clip_value)
XX_corr = np.correlate(XX_clip, XX_clip, mode='full')
center = np.argmax(XX_corr)
right_XX_corr = XX_corr[center:]
prev_window = max([window - 1, 0])
if voiced_unvoiced[prev_window] > 0:
# Want it to be harder to turn off than turn on
strength_factor = .29
else:
strength_factor = .3
start = np.where(right_XX_corr < .3 * XX_corr[center])[0]
# 20 is hardcoded but should depend on samplerate?
try:
start = np.max([20, start[0]])
except IndexError:
start = 20
search_corr = right_XX_corr[start:]
index = np.argmax(search_corr)
second_max = search_corr[index]
if (second_max > strength_factor * XX_corr[center]):
voiced_unvoiced[window] = 1
period[window] = start + index - 1
else:
voiced_unvoiced[window] = 0
period[window] = 0
return np.array(voiced_unvoiced), np.array(period)
def lpc_analysis(X, order=8, window_step=128, window_size=2 * 128,
emphasis=0.9, voiced_start_threshold=.9,
voiced_stop_threshold=.6, truncate=False, copy=True):
"""
Extract LPC coefficients from a signal
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
_rParameters
----------
X : ndarray
Signals to extract LPC coefficients from
order : int, optional (default=8)
Order of the LPC coefficients. For speech, use the general rule that the
order is two times the expected number of formants plus 2.
This can be formulated as 2 + 2 * (fs // 2000). For approx. signals
with fs = 7000, this is 8 coefficients - 2 + 2 * (7000 // 2000).
window_step : int, optional (default=128)
The size (in samples) of the space between each window
window_size : int, optional (default=2 * 128)
The size of each window (in samples) to extract coefficients over
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
voiced_start_threshold : float, optional (default=0.9)
Upper power threshold for estimating when speech has started
voiced_stop_threshold : float, optional (default=0.6)
Lower power threshold for estimating when speech has stopped
truncate : bool, optional (default=False)
Whether to cut the data at the last window or do zero padding.
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
lp_coefficients : ndarray
lp coefficients to describe the frame
per_frame_gain : ndarray
calculated gain for each frame
residual_excitation : ndarray
leftover energy which is not described by lp coefficents and gain
voiced_frames : ndarray
array of [0, 1] values which holds voiced/unvoiced decision for each
frame.
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
X = np.array(X, copy=copy)
if len(X.shape) < 2:
X = X[None]
n_points = X.shape[1]
n_windows = n_points // window_step
if not truncate:
pad_sizes = [(window_size - window_step) // 2,
window_size - window_step // 2]
# TODO: Handling for odd window sizes / steps
X = np.hstack((np.zeros((X.shape[0], pad_sizes[0])), X,
np.zeros((X.shape[0], pad_sizes[1]))))
else:
pad_sizes = [0, 0]
X = X[0, :n_windows * window_step]
lp_coefficients = np.zeros((n_windows, order + 1))
per_frame_gain = np.zeros((n_windows, 1))
residual_excitation = np.zeros(
((n_windows - 1) * window_step + window_size))
# Pre-emphasis high-pass filter
X = sg.lfilter([1, -emphasis], 1, X)
# stride_tricks.as_strided?
autocorr_X = np.zeros((n_windows, 2 * window_size - 1))
for window in range(max(n_windows - 1, 1)):
XX = X.ravel()[window * window_step + np.arange(window_size)]
WXX = XX * sg.hanning(window_size)
autocorr_X[window] = np.correlate(WXX, WXX, mode='full')
center = np.argmax(autocorr_X[window])
RXX = autocorr_X[window,
np.arange(center, window_size + order)]
R = linalg.toeplitz(RXX[:-1])
solved_R = linalg.pinv(R).dot(RXX[1:])
filter_coefs = np.hstack((1, -solved_R))
residual_signal = sg.lfilter(filter_coefs, 1, WXX)
gain = np.sqrt(np.mean(residual_signal ** 2))
lp_coefficients[window] = filter_coefs
per_frame_gain[window] = gain
assign_range = window * window_step + np.arange(window_size)
residual_excitation[assign_range] += residual_signal / gain
# Throw away first part in overlap mode for proper synthesis
residual_excitation = residual_excitation[pad_sizes[0]:]
return lp_coefficients, per_frame_gain, residual_excitation
def lpc_to_frequency(lp_coefficients, per_frame_gain):
"""
Extract resonant frequencies and magnitudes from LPC coefficients and gains.
Parameters
----------
lp_coefficients : ndarray
LPC coefficients, such as those calculated by ``lpc_analysis``
per_frame_gain : ndarray
Gain calculated for each frame, such as those calculated
by ``lpc_analysis``
Returns
-------
frequencies : ndarray
Resonant frequencies calculated from LPC coefficients and gain. Returned
frequencies are from 0 to 2 * pi
magnitudes : ndarray
Magnitudes of resonant frequencies
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
n_windows, order = lp_coefficients.shape
frame_frequencies = np.zeros((n_windows, (order - 1) // 2))
frame_magnitudes = np.zeros_like(frame_frequencies)
for window in range(n_windows):
w_coefs = lp_coefficients[window]
g_coefs = per_frame_gain[window]
roots = np.roots(np.hstack(([1], w_coefs[1:])))
# Roots doesn't return the same thing as MATLAB... agh
frequencies, index = np.unique(
np.abs(np.angle(roots)), return_index=True)
# Make sure 0 doesn't show up...
gtz = np.where(frequencies > 0)[0]
frequencies = frequencies[gtz]
index = index[gtz]
magnitudes = g_coefs / (1. - np.abs(roots))
sort_index = np.argsort(frequencies)
frame_frequencies[window, :len(sort_index)] = frequencies[sort_index]
frame_magnitudes[window, :len(sort_index)] = magnitudes[sort_index]
return frame_frequencies, frame_magnitudes
def lpc_to_lsf(all_lpc):
if len(all_lpc.shape) < 2:
all_lpc = all_lpc[None]
order = all_lpc.shape[1] - 1
all_lsf = np.zeros((len(all_lpc), order))
for i in range(len(all_lpc)):
lpc = all_lpc[i]
lpc1 = np.append(lpc, 0)
lpc2 = lpc1[::-1]
sum_filt = lpc1 + lpc2
diff_filt = lpc1 - lpc2
if order % 2 != 0:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff, _ = sg.deconvolve(diff_filt, [1, -1])
deconv_sum, _ = sg.deconvolve(sum_filt, [1, 1])
roots_diff = np.roots(deconv_diff)
roots_sum = np.roots(deconv_sum)
angle_diff = np.angle(roots_diff[::2])
angle_sum = np.angle(roots_sum[::2])
lsf = np.sort(np.hstack((angle_diff, angle_sum)))
if len(lsf) != 0:
all_lsf[i] = lsf
return np.squeeze(all_lsf)
def lsf_to_lpc(all_lsf):
if len(all_lsf.shape) < 2:
all_lsf = all_lsf[None]
order = all_lsf.shape[1]
all_lpc = np.zeros((len(all_lsf), order + 1))
for i in range(len(all_lsf)):
lsf = all_lsf[i]
zeros = np.exp(1j * lsf)
sum_zeros = zeros[::2]
diff_zeros = zeros[1::2]
sum_zeros = np.hstack((sum_zeros, np.conj(sum_zeros)))
diff_zeros = np.hstack((diff_zeros, np.conj(diff_zeros)))
sum_filt = np.poly(sum_zeros)
diff_filt = np.poly(diff_zeros)
if order % 2 != 0:
deconv_diff = sg.convolve(diff_filt, [1, 0, -1])
deconv_sum = sum_filt
else:
deconv_diff = sg.convolve(diff_filt, [1, -1])
deconv_sum = sg.convolve(sum_filt, [1, 1])
lpc = .5 * (deconv_sum + deconv_diff)
# Last coefficient is 0 and not returned
all_lpc[i] = lpc[:-1]
return np.squeeze(all_lpc)
def lpc_synthesis(lp_coefficients, per_frame_gain, residual_excitation=None,
voiced_frames=None, window_step=128, emphasis=0.9):
"""
Synthesize a signal from LPC coefficients
Based on code from:
http://labrosa.ee.columbia.edu/matlab/sws/
http://web.uvic.ca/~tyoon/resource/auditorytoolbox/auditorytoolbox/synlpc.html
Parameters
----------
lp_coefficients : ndarray
Linear prediction coefficients
per_frame_gain : ndarray
Gain coefficients
residual_excitation : ndarray or None, optional (default=None)
Residual excitations. If None, this will be synthesized with white noise
voiced_frames : ndarray or None, optional (default=None)
Voiced frames. If None, all frames assumed to be voiced.
window_step : int, optional (default=128)
The size (in samples) of the space between each window
emphasis : float, optional (default=0.9)
The emphasis coefficient to use for filtering
overlap_add : bool, optional (default=True)
What type of processing to use when joining windows
copy : bool, optional (default=True)
Whether to copy the input X or modify in place
Returns
-------
synthesized : ndarray
Sound vector synthesized from input arguments
References
----------
D. P. W. Ellis (2004), "Sinewave Speech Analysis/Synthesis in Matlab",
Web resource, available: http://www.ee.columbia.edu/ln/labrosa/matlab/sws/
"""
# TODO: Incorporate better synthesis from
# http://eecs.oregonstate.edu/education/docs/ece352/CompleteManual.pdf
window_size = 2 * window_step
[n_windows, order] = lp_coefficients.shape
n_points = (n_windows + 1) * window_step
n_excitation_points = n_points + window_step + window_step // 2
random_state = np.random.RandomState(1999)
if residual_excitation is None:
# Need to generate excitation
if voiced_frames is None:
# No voiced/unvoiced info
voiced_frames = np.ones((lp_coefficients.shape[0], 1))
residual_excitation = np.zeros((n_excitation_points))
f, m = lpc_to_frequency(lp_coefficients, per_frame_gain)
t = np.linspace(0, 1, window_size, endpoint=False)
hanning = sg.hanning(window_size)
for window in range(n_windows):
window_base = window * window_step
index = window_base + np.arange(window_size)
if voiced_frames[window]:
sig = np.zeros_like(t)
cycles = np.cumsum(f[window][0] * t)
sig += sg.sawtooth(cycles, 0.001)
residual_excitation[index] += hanning * sig
residual_excitation[index] += hanning * 0.01 * random_state.randn(
window_size)
else:
n_excitation_points = residual_excitation.shape[0]
n_points = n_excitation_points + window_step + window_step // 2
residual_excitation = np.hstack((residual_excitation,
np.zeros(window_size)))
if voiced_frames is None:
voiced_frames = np.ones_like(per_frame_gain)
synthesized = np.zeros((n_points))
for window in range(n_windows):
window_base = window * window_step
oldbit = synthesized[window_base + np.arange(window_step)]
w_coefs = lp_coefficients[window]
if not np.all(w_coefs):
# Hack to make lfilter avoid
# ValueError: BUG: filter coefficient a[0] == 0 not supported yet
# when all coeffs are 0
w_coefs = [1]
g_coefs = voiced_frames[window] * per_frame_gain[window]
index = window_base + np.arange(window_size)
newbit = g_coefs * sg.lfilter([1], w_coefs,
residual_excitation[index])
synthesized[index] = np.hstack((oldbit, np.zeros(
(window_size - window_step))))
synthesized[index] += sg.hanning(window_size) * newbit
synthesized = sg.lfilter([1], [1, -emphasis], synthesized)
return synthesized
class base_iterator(object):
def __init__(self, list_of_containers, minibatch_size,
axis,
start_index=0,
stop_index=np.inf,
randomize=False,
make_mask=False,
one_hot_class_size=None):
self.list_of_containers = list_of_containers
self.minibatch_size = minibatch_size
self.make_mask = make_mask
self.start_index = start_index
self.stop_index = stop_index
self.randomize = randomize
self.slice_start_ = start_index
self.axis = axis
if axis not in [0, 1]:
raise ValueError("Unknown sample_axis setting %i" % axis)
self.one_hot_class_size = one_hot_class_size
self.random_state = np.random.RandomState(2017)
len0 = len(list_of_containers[0])
assert all([len(ci) == len0 for ci in list_of_containers])
if one_hot_class_size is not None:
assert len(self.one_hot_class_size) == len(list_of_containers)
def reset(self):
self.slice_start_ = self.start_index
if self.randomize:
start_ind = self.start_index
stop_ind = min(len(self.list_of_containers[0]), self.stop_index)
inds = np.arange(start_ind, stop_ind).astype("int32")
# If start index is > 0 then pull some mad hackery to only shuffle
# the end part - eg. validation set.
self.random_state.shuffle(inds)
if start_ind > 0:
orig_inds = np.arange(0, start_ind).astype("int32")
inds = np.concatenate((orig_inds, inds))
new_list_of_containers = []
for ci in self.list_of_containers:
nci = [ci[i] for i in inds]
if isinstance(ci, np.ndarray):
nci = np.array(nci)
new_list_of_containers.append(nci)
self.list_of_containers = new_list_of_containers
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
self.slice_end_ = self.slice_start_ + self.minibatch_size
if self.slice_end_ > self.stop_index:
# TODO: Think about boundary issues with weird shaped last mb
self.reset()
raise StopIteration("Stop index reached")
ind = slice(self.slice_start_, self.slice_end_)
self.slice_start_ = self.slice_end_
if self.make_mask is False:
res = self._slice_without_masks(ind)
if not all([self.minibatch_size in r.shape for r in res]):
# TODO: Check that things are even
self.reset()
raise StopIteration("Partial slice returned, end of iteration")
return res
else:
res = self._slice_with_masks(ind)
# TODO: Check that things are even
if not all([self.minibatch_size in r.shape for r in res]):
self.reset()
raise StopIteration("Partial slice returned, end of iteration")
return res
def _slice_without_masks(self, ind):
raise AttributeError("Subclass base_iterator and override this method")
def _slice_with_masks(self, ind):
raise AttributeError("Subclass base_iterator and override this method")
class list_iterator(base_iterator):
def _slice_without_masks(self, ind):
sliced_c = []
for c in self.list_of_containers:
slc = c[ind]
arr = np.asarray(slc)
sliced_c.append(arr)
if min([len(i) for i in sliced_c]) < self.minibatch_size:
self.reset()
raise StopIteration("Invalid length slice")
for n in range(len(sliced_c)):
sc = sliced_c[n]
if self.one_hot_class_size is not None:
convert_it = self.one_hot_class_size[n]
if convert_it is not None:
raise ValueError("One hot conversion not implemented")
if not isinstance(sc, np.ndarray) or sc.dtype == np.object:
maxlen = max([len(i) for i in sc])
# Assume they at least have the same internal dtype
if len(sc[0].shape) > 1:
total_shape = (maxlen, sc[0].shape[1])
elif len(sc[0].shape) == 1:
total_shape = (maxlen, 1)
else:
raise ValueError("Unhandled array size in list")
if self.axis == 0:
raise ValueError("Unsupported axis of iteration")
new_sc = np.zeros((len(sc), total_shape[0],
total_shape[1]))
new_sc = new_sc.squeeze().astype(sc[0].dtype)
else:
new_sc = np.zeros((total_shape[0], len(sc),
total_shape[1]))
new_sc = new_sc.astype(sc[0].dtype)
for m, sc_i in enumerate(sc):
new_sc[:len(sc_i), m, :] = sc_i
sliced_c[n] = new_sc
else:
# Hit this case if all sequences are the same length
if self.axis == 1:
sliced_c[n] = sc.transpose(1, 0, 2)
return sliced_c
def _slice_with_masks(self, ind):
cs = self._slice_without_masks(ind)
if self.axis == 0:
ms = [np.ones_like(c[:, 0]) for c in cs]
raise ValueError("NYI - see axis=0 case for ideas")
sliced_c = []
for n, c in enumerate(self.list_of_containers):
slc = c[ind]
for ii, si in enumerate(slc):
ms[n][ii, len(si):] = 0.
elif self.axis == 1:
ms = [np.ones_like(c[:, :, 0]) for c in cs]
sliced_c = []
for n, c in enumerate(self.list_of_containers):
slc = c[ind]
for ii, si in enumerate(slc):
ms[n][len(si):, ii] = 0.
assert len(cs) == len(ms)
return [i for sublist in list(zip(cs, ms)) for i in sublist]
class audio_file_iterator(object):
def __init__(self, file_glob, minibatch_size, start_index=0,
stop_index=np.inf, make_mask=True,
randomize=True, preprocess=None,
preprocess_kwargs={}):
"""
Supports regular int, negative indexing, or float for setting
stop_index
"""
self.minibatch_size = minibatch_size
self.file_list = glob.glob(file_glob)
if len(self.file_list) == 0:
raise ValueError("Invalid file glob, no files found!")
if randomize:
self.random_state = np.random.RandomState(2177)
self.random_state.shuffle(self.file_list)
self.make_mask = make_mask
ext = self.file_list[0].split(".")[-1]
if ext == "flac":
import scikits.audiolab
def _read(fpath):
# d in -1, 1
d, fs, enc = scikits.audiolab.flacread(fpath)
return d
else:
raise ValueError("Unhandled extension %s" % ext)
self._read_file = _read
if preprocess is None:
self.preprocess_function = lambda x: x[:, None]
elif preprocess == "quantize_window":
# reslice to 2D in sets of 128, 4
n_frame = 4
n_bins = 256
if "n_frame" in preprocess_kwargs.keys():
n_frame = preprocess_kwargs["n_frame"]
if "n_bins" in preprocess_kwargs.keys():
n_bins = preprocess_kwargs["n_bins"]
def p(x):
# apply_ functions meant to operate on lists of sequences
return apply_quantize_window_preproc([x], n_bins, n_frame,
mn=-1, mx=1)[0].astype("float32")
self.preprocess_function = p
elif preprocess == "quantize":
n_bins = 256
if "n_bins" in preprocess_kwargs.keys():
n_bins = preprocess_kwargs["n_bins"]
def p(x):
# apply_ functions meant to operate on lists of sequences
return apply_quantize_preproc([x], n_bins,
mn=-1, mx=1)[0][:, None].astype("float32")
self.preprocess_function = p
if stop_index >= 1:
self.stop_index = int(min(stop_index, len(self.file_list)))
elif stop_index > 0:
# percentage
self.stop_index = int(stop_index * len(self.file_list))
elif stop_index < 0:
# negative index - must be int!
self.stop_index = len(self.file_list) + int(stop_index)
if start_index < 0:
# negative indexing
self.start_index = len(self.file_list) + start_index
elif start_index < 1:
# float
self.start_index = int(start_index * len(self.file_list))
else:
# regular
self.start_index = int(start_index)
if self.start_index >= self.stop_index:
ss = "Invalid indexes - stop "
ss += "%s <= start %s !" % (self.stop_index, self.start_index)
raise ValueError(ss)
self._current_index = self.start_index
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
s = self._current_index
e = self._current_index + self.minibatch_size
if e > self.stop_index:
raise StopIteration("End of audio file iterator reached!")
files_to_get = self.file_list[s:e]
data = [self._read_file(fp) for fp in files_to_get]
data = [self.preprocess_function(d) for d in data]
li = list_iterator([data], self.minibatch_size, axis=1, start_index=0,
stop_index=len(data), make_mask=self.make_mask)
res = next(li)
self._current_index = e
return res
def reset(self):
self._current_index = self.start_index
def get_dataset_dir(dataset_name):
""" Get dataset directory path """
return os.sep.join(os.path.realpath(__file__).split
(os.sep)[:-1] + [dataset_name])
def numpy_one_hot(labels_dense, n_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
labels_shape = labels_dense.shape
labels_dtype = labels_dense.dtype
labels_dense = labels_dense.ravel().astype("int32")
n_labels = labels_dense.shape[0]
index_offset = np.arange(n_labels) * n_classes
labels_one_hot = np.zeros((n_labels, n_classes))
labels_one_hot[np.arange(n_labels).astype("int32"),
labels_dense.ravel()] = 1
labels_one_hot = labels_one_hot.reshape(labels_shape+(n_classes,))
return labels_one_hot.astype(labels_dtype)
def tokenize_ind(phrase, vocabulary):
vocabulary_size = len(vocabulary.keys())
phrase = [vocabulary[char_] for char_ in phrase]
phrase = np.array(phrase, dtype='int32').ravel()
phrase = numpy_one_hot(phrase, vocabulary_size)
return phrase
def apply_quantize_preproc(X, n_bins=256, mn=-32768, mx=32768):
def scale(x):
x = (x - mn) / (mx - mn)
return x
# Extra n because bin is reserved kwd in Python
bins = np.linspace(0, 1, n_bins)
def binn(x):
shp = x.shape
return (np.digitize(x.ravel(), bins) - 1).reshape(shp)
X = [scale(Xi) for Xi in X]
X = [binn(Xi) for Xi in X]
return X
def apply_quantize_window_preproc(X, n_bins=256, n_frame=4, mn=-32768, mx=32768):
def scale(x):
# scale to 0, 1
x = a.astype("float64")
x = (x - x.min()) / (x.max() - x.min())
return x
# Extra n because bin is reserved kwd in Python
bins = np.linspace(0, 1, n_bins)
def binn(x):
x = x * (bins - 0.5) # 0 to 255.5
x = x.astype("int32").astype("float32")
return x
X = [scale(Xi) for Xi in X]
X = [binn(Xi) for Xi in X]
def _s(x):
xi = x[:len(x) - len(x) % n_frame]
# chunk it
xi = xi.reshape((-1, n_frame))
#xi = overlap(xi, 4, 1)
return xi
X_windows = []
for x in X:
xw = _s(x)
X_windows.append(xw)
X = X_windows
return X
def apply_stft_preproc(X, n_fft=128, n_step_frac=4):
n_step = n_fft // n_step_frac
def _pre(x):
X_stft = stft(x, n_fft, step=n_step)
# Power spectrum
X_mag = complex_to_abs(X_stft)
X_mag = np.log10(X_mag + 1E-9)
# unwrap phase then take delta
X_phase = complex_to_angle(X_stft)
X_phase = np.vstack((np.zeros_like(X_phase[0][None]), X_phase))
# Adding zeros to make network predict what *delta* in phase makes sense
X_phase_unwrap = np.unwrap(X_phase, axis=0)
X_phase_delta = X_phase_unwrap[1:] - X_phase_unwrap[:-1]
X_mag_phase = np.hstack((X_mag, X_phase_delta))
return X_mag_phase
X = [_pre(Xi) for Xi in X]
X_len = np.sum([len(Xi) for Xi in X])
X_sum = np.sum([Xi.sum(axis=0) for Xi in X], axis=0)
X_mean = X_sum / X_len
X_var = np.sum([np.sum((Xi - X_mean[None]) ** 2, axis=0)
for Xi in X], axis=0) / X_len
def scale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = (_x - _mean) / _var
return x
def unscale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = _x * _var + _mean
return x
X = [scale(Xi) for Xi in X]
def _re(x):
X_mag_phase = unscale(x)
X_mag = X_mag_phase[:, :n_fft // 2]
X_mag = 10 ** X_mag
X_phase_delta = X_mag_phase[:, n_fft // 2:]
# Append leading 0s for consistency
X_phase_delta = np.vstack((np.zeros_like(X_phase_delta[0][None]),
X_phase_delta))
X_phase = np.cumsum(X_phase_delta, axis=0)[:-1]
X_stft = abs_and_angle_to_complex(X_mag, X_phase)
X_r = istft(X_stft, n_fft, step=n_step, wsola=False)
return X_r
return X, _re
def apply_binned_spectrogram_preproc(X, n_fft=512, n_step_frac=10,
n_bins=10):
n_step = n_fft // n_step_frac
def _pre(x):
X_mag = np.abs(stft(x, n_fft, step=n_step))
X_mag = np.log10(X_mag + 1E-9)
return X_mag
X = [_pre(Xi) for Xi in X]
X_len = np.sum([len(Xi) for Xi in X])
X_sum = np.sum([Xi.sum(axis=0) for Xi in X], axis=0)
X_mean = X_sum / X_len
X_var = np.sum([np.sum((Xi - X_mean[None]) ** 2, axis=0)
for Xi in X], axis=0) / X_len
X_max = np.max([np.max(Xi) for Xi in X])
X_min = np.min([np.min(Xi) for Xi in X])
def scale(x):
x = (x - X_min) / (X_max - X_min)
return x
def unscale(x):
x = x * (X_max - X_min) + X_min
return x
# Extra n because bin is reserved kwd in Python
bins = np.linspace(0, 1, n_bins)
def binn(x):
shp = x.shape
return (np.digitize(x.ravel(), bins) - 1).reshape(shp)
def unbin(x):
# use middle instead of left edge?
return x / float(n_bins)
X = [scale(Xi) for Xi in X]
X = [binn(Xi) for Xi in X]
"""
import matplotlib.pyplot as plt
plt.matshow(X[0][::-1, ::-1].T)
plt.matshow(unbin(X[0])[::-1, ::-1].T)
from IPython import embed; embed()
raise ValueError()
"""
def _re(x):
X_ub = unbin(x)
X_mag = unscale(X_ub)
X_mag = 10 ** X_mag
X_s = np.hstack((X_mag, X_mag[:, ::-1]))
X_r = iterate_invert_spectrogram(X_s, n_fft, n_step)
return X_r
return X, _re
def apply_labeled_spectrogram_preproc(X, n_fft=512, n_step_frac=10,
n_bins=10):
X, _re = apply_binned_spectrogram_preproc(X, n_fft=n_fft,
n_step_frac=n_step_frac,
n_bins=n_bins)
# Make sum and var lookup for discretization of class preds
sum_lookup = defaultdict(list)
var_lookup = defaultdict(list)
sum_labels = []
var_labels = []
for Xi in X:
sums = Xi.sum(axis=1)
sum_label_set = []
for n, s in enumerate(sums):
sum_lookup[s].append(Xi[n])
sum_label_set.append(s)
sum_labels.append(sum_label_set)
# vars is reserved...
varis = Xi.var(axis=1)
var_label_set = []
for n, v in enumerate(varis):
var_lookup[v].append(Xi[n])
var_label_set.append(v)
var_labels.append(var_label_set)
class_to_sum = {k: v for k,v in enumerate(sorted(sum_lookup.keys()))}
sum_to_class = {v: k for k,v in class_to_sum.items()}
class_to_var = {k: v for k,v in enumerate(sorted(var_lookup.keys()))}
var_to_class = {v: k for k,v in class_to_var.items()}
sum_labels = [np.array([sum_to_class[ii] for ii in i]).astype("int32")
for i in sum_labels]
var_labels = [np.array([var_to_class[ii] for ii in i]).astype("int32")
for i in var_labels]
def _full_re(s, v):
assert len(s) == len(v)
# slow distance minimization loop for decode
res = []
for s_t, v_t in zip(s, v):
s_cand = sum_lookup[class_to_sum[s_t]]
v_cand = var_lookup[class_to_var[v_t]]
dist = np.zeros((len(s_cand), len(v_cand)))
# Could do as as outer product for speed
for i, s_c in enumerate(s_cand):
s_c_norm = np.sqrt(np.sum(s_c ** 2))
for j, v_c in enumerate(v_cand):
v_c_norm = np.sqrt(np.sum(v_c ** 2))
dist_ij = np.sqrt(np.sum((s_c - v_c) ** 2))
dist_ij /= (s_c_norm * v_c_norm)
dist[i, j] = dist_ij
if any(dist.ravel() < 1E-9):
# if any are almost 0, take it!
sum_idx = np.where(dist < 1E-9)[0][0]
else:
# find which one minimizes all var scores
sum_idx = np.argmin(np.sum(dist, axis=1))
final_cand = s_cand[sum_idx]
res.append(final_cand)
res = np.asarray(res)
res = _re(res)
return res
return sum_labels, var_labels, _full_re
def apply_lpc_softmax_preproc(X, fs=8000):
# 256 @ 8khz - .032
ws = 2 ** int(np.log(0.032 * fs) / np.log(2))
window_size = ws
window_step = int(.2 * window_size)
lpc_order = 12
def _pre(x):
a, g, e = lpc_analysis(x, order=lpc_order, window_step=window_step,
window_size=window_size, emphasis=0.9,
copy=True)
a = lpc_to_lsf(a)
f_sub = np.hstack((a, g))
v, p = voiced_unvoiced(x, window_size=window_size,
window_step=window_step)
cut_len = e.shape[0] - e.shape[0] % len(a)
e = e[:cut_len]
e = e.reshape((len(a), -1))
f_full = np.hstack((a, g, v, e))
return f_sub, f_full
X = [_pre(Xi)[0] for Xi in X]
X_stack = np.vstack(X)
kmeans_results = []
random_state = np.random.RandomState(1999)
n_clust = 60
for dim in range(X_stack.shape[1]):
print("Processing dim %i of %i" % (dim + 1, X_stack.shape[1]))
# Assume some clusters will die
res = kmeans(X_stack[:, dim], n_clust * 2)
sub = list(range(len(res[0])))
random_state.shuffle(sub)
assert len(sub) > n_clust
kmeans_results.append(res[0][sub[:n_clust]])
def _vq(Xi):
Xi2 = Xi.copy()
for dim in range(X_stack.shape[1]):
idx, _ = vq(Xi[:, dim], kmeans_results[dim])
Xi2[:, dim] = idx
return Xi2
def _unvq(Xi):
"""
assumes vq indices have been cast to float32
"""
Xi2 = Xi.copy()
for dim in range(X_stack.shape[1]):
Xi2[:, dim] = kmeans_results[dim][Xi[:, dim].astype("int32")]
return Xi2
X = [_vq(Xi) for Xi in X]
def _apply(Xi):
return _vq(_pre(Xi)[0])
def _re_sub(sub):
sub = _unvq(sub)
a = sub[:, :-1]
a = lsf_to_lpc(a)
g = sub[:, -1:]
x_r = lpc_synthesis(a, g, emphasis=0.9,
window_step=window_step)
agc_x_r, _, _ = time_attack_agc(x_r, fs)
return agc_x_r
def _re_full(full):
raise ValueError("NYI")
a = full[:, :lpc_order]
a = np.hstack(np.ones_like(a[:, 0]), a)
offset = lpc_order
g = full[:, offset:offset + 1]
offset = offset + 1
v = full[:, offset:offset + 1]
offset = offset + 1
e = full[:, offset:].ravel()
x_r = lpc_synthesis(a, g, e, voiced_frames=v,
emphasis=0.9, window_step=window_step)
agc_x_r, _, _ = time_attack_agc(x_r, fs)
return agc_x_r
return X, _apply, _re_sub
def fetch_binarized_mnist():
# public version
url = 'https://github.com/mgermain/MADE/releases/download/ICML2015/'
url += 'binarized_mnist.npz'
partial_path = get_resource_dir("binarized_mnist")
fname = "binarized_mnist.npz"
full_path = os.path.join(partial_path, fname)
if not os.path.exists(full_path):
download(url, full_path)
d = np.load(full_path)
train = d["train_data"]
valid = d["valid_data"]
test = d["test_data"]
all_ = np.concatenate((train, valid, test), axis=0)
train_indices = np.arange(train.shape[0])
valid_indices = train.shape[0] + np.arange(valid.shape[0])
test_indices = train.shape[0] + valid.shape[0] + np.arange(test.shape[0])
t = {}
t["data"] = all_.reshape((-1, 28, 28))
t["train_indices"] = train_indices
t["valid_indices"] = valid_indices
t["test_indices"] = test_indices
"""
# personal version
url = "https://dl.dropboxusercontent.com/u/15378192/binarized_mnist_%s.npy"
fname = "binarized_mnist_%s.npy"
for s in ["train", "valid", "test"]:
full_path = os.path.join(partial_path, fname % s)
if not os.path.exists(partial_path):
os.makedirs(partial_path)
if not os.path.exists(full_path):
download(url % s, full_path, progress_update_percentage=1)
"""
return t
def fetch_nottingham():
midiread, midiwrite = midiwrap()
url = "http://www.iro.umontreal.ca/~lisa/deep/data/Nottingham.zip"
partial_path = get_resource_dir("nottingham")
full_path = os.path.join(partial_path, "Nottingham.zip")
if not os.path.exists(full_path):
download(url, full_path)
zip_ref = zipfile.ZipFile(full_path, 'r')
zip_ref.extractall(partial_path)
zip_ref.close()
key_range = (21, 109)
dt = 0.3
all_data = []
with zipfile.ZipFile(full_path, "r") as f:
for name in f.namelist():
if ".mid" not in name:
# Skip README
continue
p = os.path.join(partial_path, name)
data = midiread(p, key_range, dt).piano_roll.astype(
theano.config.floatX)
all_data.append(data)
raise ValueError("Fix to return dictionary like the rest of the fetch_")
return key_range, dt, all_data
def fetch_fruitspeech_softmax():
fs, d, wav_names = fetch_sample_speech_fruit()
def matcher(name):
return name.split("/")[1]
classes = [matcher(wav_name) for wav_name in wav_names]
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
# Is it kosher to kmeans on all the data?
X, _apply, _re = apply_lpc_softmax_preproc(d)
"""
for n, Xi in enumerate(X[::8]):
di = _re(Xi)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data"] = X
speech["target"] = y
speech["reconstruct"] = _re
return speech
def fetch_fruitspeech_spectrogram():
fs, d, wav_names = fetch_sample_speech_fruit()
def matcher(name):
return name.split("/")[1]
classes = [matcher(wav_name) for wav_name in wav_names]
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
X, _re = apply_binned_spectrogram_preproc(d)
"""
for n, Xi in enumerate(X[::8]):
di = _re(Xi)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data"] = X
speech["target"] = y
speech["reconstruct"] = _re
return speech
def fetch_fruitspeech():
#258 to get 256 in fancy log
#n_bins = 258
n_bins = 256
fs, d, wav_names = fetch_sample_speech_fruit()
def matcher(name):
return name.split("/")[1]
classes = [matcher(wav_name) for wav_name in wav_names]
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
X = d
# DC center
X = [di - di.mean() for di in d]
X_max = np.max([np.max(Xi) for Xi in X]).astype("float32")
X_min = np.min([np.min(Xi) for Xi in X]).astype("float32")
def scale(x):
x = (x - X_min) / (X_max - X_min)
return x
def unscale(x):
x = x * (X_max - X_min) + X_min
return x
# Extra n because bin is reserved kwd in Python
#bins = np.linspace(0, 1, n_bins)
# 9 is 10 - 1, 10 ** 1
"""
bins = (np.logspace(0, 1, n_bins // 2) - 1) / 9
bins2 = bins / 2. + .5
bins = .5 - bins[::-1] / 2.
#want 256 bins - end up making a slightly larger bin above .5 (0 in -1, 1)
bins = np.concatenate((bins, bins2[2:]))
"""
bins = np.linspace(0, 1, n_bins)
def binn(x):
shp = x.shape
return (np.digitize(x.ravel(), bins) - 1).reshape(shp)
def unbin(x):
# use middle instead of left edge?
return np.array([bins[xi] for xi in x]).astype("float32")
X = [scale(Xi) for Xi in X]
X = [binn(Xi) for Xi in X]
def _re(x):
X_ub = unbin(x)
X_r = unscale(X_ub)
return X_r
"""
for n, Xi in enumerate(X[::8]):
di = _re(Xi)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data"] = X
speech["target"] = y
speech["reconstruct"] = _re
return speech
def fetch_fruitspeech_spectral():
fs, d, wav_names = fetch_sample_speech_fruit()
def matcher(name):
return name.split("/")[1]
classes = [matcher(wav_name) for wav_name in wav_names]
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
n_fft = 128
n_step = n_fft // 4
def _pre(x):
X_stft = stft(x, n_fft, step=n_step)
# Power spectrum
X_mag = complex_to_abs(X_stft)
X_mag = np.log10(X_mag + 1E-9)
# unwrap phase then take delta
X_phase = complex_to_angle(X_stft)
X_phase = np.vstack((np.zeros_like(X_phase[0][None]), X_phase))
# Adding zeros to make network predict what *delta* in phase makes sense
X_phase_unwrap = np.unwrap(X_phase, axis=0)
X_phase_delta = X_phase_unwrap[1:] - X_phase_unwrap[:-1]
X_mag_phase = np.hstack((X_mag, X_phase_delta))
return X_mag_phase
X = [_pre(di) for di in d]
X_len = np.sum([len(Xi) for Xi in X])
X_sum = np.sum([Xi.sum(axis=0) for Xi in X], axis=0)
X_mean = X_sum / X_len
X_var = np.sum([np.sum((Xi - X_mean[None]) ** 2, axis=0)
for Xi in X], axis=0) / X_len
def scale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = (_x - _mean) / _var
return x
def unscale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = _x * _var + _mean
return x
X = [scale(Xi) for Xi in X]
def _re(x):
X_mag_phase = unscale(x)
X_mag = X_mag_phase[:, :n_fft // 2]
X_mag = 10 ** X_mag
X_phase_delta = X_mag_phase[:, n_fft // 2:]
# Append leading 0s for consistency
X_phase_delta = np.vstack((np.zeros_like(X_phase_delta[0][None]),
X_phase_delta))
X_phase = np.cumsum(X_phase_delta, axis=0)[:-1]
X_stft = abs_and_angle_to_complex(X_mag, X_phase)
X_r = istft(X_stft, n_fft, step=n_step, wsola=False)
return X_r
"""
for n, Xi in enumerate(X[::8]):
di = _re(Xi)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data"] = X
speech["target"] = y
speech["reconstruct"] = _re
return speech
def fetch_fruitspeech_spectrogram_nonpar():
fs, d, wav_names = fetch_sample_speech_fruit()
def matcher(name):
return name.split("/")[1]
classes = [matcher(wav_name) for wav_name in wav_names]
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
data1, data2, _re = apply_labeled_spectrogram_preproc(d, n_bins=10)
"""
# Check reconstructions
for n, (s_l, v_l) in enumerate(list(zip(data1, data2))[::8]):
di = _re(s_l, v_l)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
data1_size = max([max(d1) for d1 in data1])
data2_size = max([max(d2) for d2 in data2])
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data1"] = data1
speech["data1_size"] = data1_size
speech["data2"] = data2
speech["data2_size"] = data2_size
speech["target"] = y
speech["reconstruct"] = _re
return speech
def fetch_fruitspeech_nonpar():
fs, d, wav_names = fetch_sample_speech_fruit()
def matcher(name):
return name.split("/")[1]
classes = [matcher(wav_name) for wav_name in wav_names]
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
# 256 @ 8khz - .032
ws = 2 ** int(np.log(0.032 * fs) / np.log(2))
window_size = ws
window_step = int(.15 * window_size)
lpc_order = 30
def _pre(x):
a, g, e = lpc_analysis(x, order=lpc_order, window_step=window_step,
window_size=window_size, emphasis=0.9,
copy=True)
f_sub = a[:, 1:]
f_full = stft(x, window_size, window_step) #, compute_onesided=False)
"""
v, p = voiced_unvoiced(x, window_size=window_size,
window_step=window_step)
cut_len = e.shape[0] - e.shape[0] % len(a)
e = e[:cut_len]
e = e.reshape((len(a), -1))
f_full = np.hstack((a, g, v, e))
"""
if len(f_sub) >= len(f_full):
f_sub = f_sub[:len(f_full)]
else:
f_full = f_full[:len(f_sub)]
return f_sub, f_full
def _train(list_of_data):
f_sub = None
f_full = None
for i in range(len(list_of_data)):
f_sub_i, f_full_i = _pre(list_of_data[i])
if f_sub is None:
f_sub = f_sub_i
f_full = f_full_i
else:
f_sub = np.vstack((f_sub, f_sub_i))
if f_full.shape[1] > f_full_i.shape[1]:
f_full_i = np.hstack(
(f_full_i, np.zeros_like(f_full_i[:, -1][:, None])))
elif f_full_i.shape[1] > f_full.shape[1]:
f_full_i = f_full_i[:, :f_full.shape[1]]
f_full = np.vstack((f_full, f_full_i))
sub_clusters = f_sub
full_clusters = f_full
return sub_clusters, full_clusters
def _clust(x, sub_clusters, extras=None):
f_sub, f_full = _pre(x)
f_clust = f_sub
mem, _ = vq(copy.deepcopy(f_clust), copy.deepcopy(sub_clusters))
# scipy vq sometimes puts out garbage? choose one at random...
# possibly related to NaN in input
#mem[np.abs(mem) >= len(mel_clusters)] = mem[
# np.abs(mem) >= len(mel_clusters)] % len(mel_clusters)
return mem
def _re(x, sub_clusters, full_clusters):
memberships = x
vq_x = full_clusters[memberships]
"""
# STFT frames not working well in rec
x_r = iterate_invert_spectrogram(vq_x, window_size, window_step,
n_iter=50, complex_input=True)
"""
x_r = istft(vq_x, window_size, window_step, wsola=True)
"""
a = vq_x[:, :lpc_order + 1]
offset = lpc_order + 1
g = vq_x[:, offset:offset + 1]
offset = offset + 1
v = vq_x[:, offset:offset + 1]
offset = offset + 1
e = vq_x[:, offset:].ravel()
x_r = lpc_synthesis(a, g, e, voiced_frames=v,
emphasis=0.9, window_step=window_step)
"""
agc_x_r, _, _ = time_attack_agc(x_r, fs)
return agc_x_r
random_state = np.random.RandomState(1999)
all_ind = list(range(8))
# Get 5 random subsets
random_state.shuffle(all_ind)
ind = all_ind[:6]
d1 = []
for i in ind:
d1 += d[i::8]
sub_clusters, full_clusters = _train(d1)
def _re_wrap(x):
x = x.argmax(axis=-1)
re_d = _re(x, sub_clusters, full_clusters)
return re_d
def _apply(x):
m = _clust(x, sub_clusters)
return m
X = [_apply(Xi) for Xi in d]
X = [numpy_one_hot(Xi, len(sub_clusters)) for Xi in X]
"""
for n, Xi in enumerate(X[all_ind[0]::8]):
di = _re_wrap(Xi)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
for n, Xi in enumerate(X[all_ind[-1]::8]):
di = _re_wrap(Xi)
wavfile.write("to_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data"] = X
speech["target"] = y
speech["reconstruct"] = _re_wrap
return speech
def fetch_ono():
fs, d, wav_names = fetch_sample_speech_ono()
# Force 1D
d = [di.squeeze() for di in d]
def matcher(name):
return name.split("PAIN")[1].split("_")[1]
classes = [matcher(wav_name) for wav_name in wav_names]
low = ["L1", "L2", "L3"]
uw = ["U1", "U2", "U3", "UF"]
high = ["L4", "L5", "SA"]
final_classes = []
for c in classes:
if c in low:
final_classes.append("low")
elif c in high:
final_classes.append("hi")
elif c in uw:
final_classes.append("uw")
else:
raise ValueError("Unknown class %s" % c)
classes = final_classes
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
n_fft = 128
n_step = n_fft // 4
def _pre(x):
X_stft = stft(x, n_fft, step=n_step)
# Power spectrum
X_mag = complex_to_abs(X_stft)
X_mag = np.log10(X_mag + 1E-9)
# unwrap phase then take delta
X_phase = complex_to_angle(X_stft)
X_phase = np.vstack((np.zeros_like(X_phase[0][None]), X_phase))
# Adding zeros to make network predict what *delta* in phase makes sense
X_phase_unwrap = np.unwrap(X_phase, axis=0)
X_phase_delta = X_phase_unwrap[1:] - X_phase_unwrap[:-1]
X_mag_phase = np.hstack((X_mag, X_phase_delta))
return X_mag_phase
X = [_pre(di) for di in d]
X_len = np.sum([len(Xi) for Xi in X])
X_sum = np.sum([Xi.sum(axis=0) for Xi in X], axis=0)
X_mean = X_sum / X_len
X_var = np.sum([np.sum((Xi - X_mean[None]) ** 2, axis=0)
for Xi in X], axis=0) / X_len
def scale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = (_x - _mean) / _var
return x
def unscale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = _x * _var + _mean
return x
X = [scale(Xi) for Xi in X]
def _re(x):
X_mag_phase = unscale(x)
X_mag = X_mag_phase[:, :n_fft // 2]
X_mag = 10 ** X_mag
X_phase_delta = X_mag_phase[:, n_fft // 2:]
# Append leading 0s for consistency
X_phase_delta = np.vstack((np.zeros_like(X_phase_delta[0][None]),
X_phase_delta))
X_phase = np.cumsum(X_phase_delta, axis=0)[:-1]
X_stft = abs_and_angle_to_complex(X_mag, X_phase)
X_r = istft(X_stft, n_fft, step=n_step, wsola=False)
return X_r
"""
for n, Xi in enumerate(X[:10]):
di = _re(Xi)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data"] = X
speech["target"] = y
speech["reconstruct"] = _re
return speech
def fetch_walla():
fs, d, wav_names = fetch_sample_speech_walla()
# Force 1D
d = [di.squeeze() for di in d]
classes = ["ab" for wav_name in wav_names]
all_chars = [c for c in sorted(list(set("".join(classes))))]
char2code = {v: k for k, v in enumerate(all_chars)}
vocabulary_size = len(char2code.keys())
y = []
for n, cl in enumerate(classes):
y.append(tokenize_ind(cl, char2code))
n_fft = 128
n_step = n_fft // 4
def _pre(x):
X_stft = stft(x, n_fft, step=n_step)
# Power spectrum
X_mag = complex_to_abs(X_stft)
X_mag = np.log10(X_mag + 1E-9)
# unwrap phase then take delta
X_phase = complex_to_angle(X_stft)
X_phase = np.vstack((np.zeros_like(X_phase[0][None]), X_phase))
# Adding zeros to make network predict what *delta* in phase makes sense
X_phase_unwrap = np.unwrap(X_phase, axis=0)
X_phase_delta = X_phase_unwrap[1:] - X_phase_unwrap[:-1]
X_mag_phase = np.hstack((X_mag, X_phase_delta))
return X_mag_phase
X = [_pre(di) for di in d]
X_len = np.sum([len(Xi) for Xi in X])
X_sum = np.sum([Xi.sum(axis=0) for Xi in X], axis=0)
X_mean = X_sum / X_len
X_var = np.sum([np.sum((Xi - X_mean[None]) ** 2, axis=0)
for Xi in X], axis=0) / X_len
def scale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = (_x - _mean) / _var
return x
def unscale(x):
# WARNING: OPERATES IN PLACE!!!
# Can only realistically scale magnitude...
# Phase cost depends on circularity
x = np.copy(x)
_x = x[:, :n_fft // 2]
_mean = X_mean[None, :n_fft // 2]
_var = X_var[None, :n_fft // 2]
x[:, :n_fft // 2] = _x * _var + _mean
return x
X = [scale(Xi) for Xi in X]
def _re(x):
X_mag_phase = unscale(x)
X_mag = X_mag_phase[:, :n_fft // 2]
X_mag = 10 ** X_mag
X_phase_delta = X_mag_phase[:, n_fft // 2:]
# Append leading 0s for consistency
X_phase_delta = np.vstack((np.zeros_like(X_phase_delta[0][None]),
X_phase_delta))
X_phase = np.cumsum(X_phase_delta, axis=0)[:-1]
X_stft = abs_and_angle_to_complex(X_mag, X_phase)
X_r = istft(X_stft, n_fft, step=n_step, wsola=False)
return X_r
"""
for n, Xi in enumerate(X[:10]):
di = _re(Xi)
wavfile.write("t_%i.wav" % n, fs, soundsc(di))
raise ValueError()
"""
speech = {}
speech["vocabulary_size"] = vocabulary_size
speech["vocabulary"] = char2code
speech["sample_rate"] = fs
speech["data"] = X
speech["target"] = y
speech["reconstruct"] = _re
return speech
"""
end datasets
"""
"""
begin initializers and Theano functions
"""
def np_zeros(shape):
"""
Builds a numpy variable filled with zeros
Parameters
----------
shape, tuple of ints
shape of zeros to initialize
Returns
-------
initialized_zeros, array-like
Array-like of zeros the same size as shape parameter
"""
return np.zeros(shape).astype(theano.config.floatX)
def np_ones(shape):
"""
Builds a numpy variable filled with ones
Parameters
----------
shape, tuple of ints
shape of ones to initialize
Returns
-------
initialized_ones, array-like
Array-like of ones the same size as shape parameter
"""
return np.ones(shape).astype(theano.config.floatX)
def np_uniform(shape, random_state, scale=0.08):
"""
Builds a numpy variable filled with uniform random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 0.08)
scale to apply to uniform random values from (-1, 1)
default of 0.08 results in uniform random values in (-0.08, 0.08)
Returns
-------
initialized_uniform, array-like
Array-like of uniform random values the same size as shape parameter
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
else:
shp = shape
# Make sure bounds aren't the same
return random_state.uniform(low=-scale, high=scale, size=shp).astype(
theano.config.floatX)
def np_normal(shape, random_state, scale=0.01):
"""
Builds a numpy variable filled with normal random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 0.01)
default of 0.01 results in normal random values with variance 0.01
Returns
-------
initialized_normal, array-like
Array-like of normal random values the same size as shape parameter
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
else:
shp = shape
return (scale * random_state.randn(*shp)).astype(theano.config.floatX)
def np_tanh_fan_uniform(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in normal uniform random values
with sqrt(6 / (fan in + fan out)) scale
Returns
-------
initialized_fan, array-like
Array-like of random values the same size as shape parameter
References
----------
Understanding the difficulty of training deep feedforward neural networks
X. Glorot, Y. Bengio
"""
if type(shape[0]) is tuple:
kern_sum = np.prod(shape[0]) + np.prod(shape[1])
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
else:
kern_sum = np.sum(shape)
shp = shape
# The . after the 6 is critical! shape has dtype int...
bound = scale * np.sqrt(6. / kern_sum)
return random_state.uniform(low=-bound, high=bound,
size=shp).astype(theano.config.floatX)
def np_tanh_fan_normal(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in normal random values
with sqrt(2 / (fan in + fan out)) scale
Returns
-------
initialized_fan, array-like
Array-like of random values the same size as shape parameter
References
----------
Understanding the difficulty of training deep feedforward neural networks
X. Glorot, Y. Bengio
"""
# The . after the 2 is critical! shape has dtype int...
if type(shape[0]) is tuple:
kern_sum = np.prod(shape[0]) + np.prod(shape[1])
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
else:
kern_sum = np.sum(shape)
shp = shape
var = scale * np.sqrt(2. / kern_sum)
return var * random_state.randn(*shp).astype(theano.config.floatX)
def np_sigmoid_fan_uniform(shape, random_state, scale=4.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 4.)
default of 4. results in uniform random values
with 4 * sqrt(6 / (fan in + fan out)) scale
Returns
-------
initialized_fan, array-like
Array-like of random values the same size as shape parameter
References
----------
Understanding the difficulty of training deep feedforward neural networks
X. Glorot, Y. Bengio
"""
return scale * np_tanh_fan_uniform(shape, random_state)
def np_sigmoid_fan_normal(shape, random_state, scale=4.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 4.)
default of 4. results in normal random values
with 4 * sqrt(2 / (fan in + fan out)) scale
Returns
-------
initialized_fan, array-like
Array-like of random values the same size as shape parameter
References
----------
Understanding the difficulty of training deep feedforward neural networks
X. Glorot, Y. Bengio
"""
return scale * np_tanh_fan_normal(shape, random_state)
def np_variance_scaled_uniform(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in uniform random values
with 1 * sqrt(1 / (n_dims)) scale
Returns
-------
initialized_scaled, array-like
Array-like of random values the same size as shape parameter
References
----------
Efficient Backprop
Y. LeCun, L. Bottou, G. Orr, K. Muller
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
kern_sum = np.prod(shape[0])
else:
shp = shape
kern_sum = shape[0]
# Make sure bounds aren't the same
bound = scale * np.sqrt(3. / kern_sum) # sqrt(3) for std of uniform
return random_state.uniform(low=-bound, high=bound, size=shp).astype(
theano.config.floatX)
def np_variance_scaled_randn(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in normal random values
with 1 * sqrt(1 / (n_dims)) scale
Returns
-------
initialized_scaled, array-like
Array-like of random values the same size as shape parameter
References
----------
Efficient Backprop
Y. LeCun, L. Bottou, G. Orr, K. Muller
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
kern_sum = np.prod(shape[0])
else:
shp = shape
kern_sum = shape[0]
# Make sure bounds aren't the same
std = scale * np.sqrt(1. / kern_sum)
return std * random_state.randn(*shp).astype(theano.config.floatX)
def np_deep_scaled_uniform(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in uniform random values
with 1 * sqrt(6 / (n_dims)) scale
Returns
-------
initialized_deep, array-like
Array-like of random values the same size as shape parameter
References
----------
Diving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet
K. He, X. Zhang, S. Ren, J. Sun
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
kern_sum = np.prod(shape[0])
else:
shp = shape
kern_sum = shape[0]
# Make sure bounds aren't the same
bound = scale * np.sqrt(6. / kern_sum) # sqrt(3) for std of uniform
return random_state.uniform(low=-bound, high=bound, size=shp).astype(
theano.config.floatX)
def np_deep_scaled_normal(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in normal random values
with 1 * sqrt(2 / (n_dims)) scale
Returns
-------
initialized_deep, array-like
Array-like of random values the same size as shape parameter
References
----------
Diving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet
K. He, X. Zhang, S. Ren, J. Sun
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
kern_sum = np.prod(shape[0])
else:
shp = shape
kern_sum = shape[0]
# Make sure bounds aren't the same
std = scale * np.sqrt(2. / kern_sum) # sqrt(3) for std of uniform
return std * random_state.randn(*shp).astype(theano.config.floatX)
def np_ortho(shape, random_state, scale=1.):
"""
Builds a numpy variable filled with orthonormal random values
Parameters
----------
shape, tuple of ints or tuple of tuples
shape of values to initialize
tuple of ints should be single shape
tuple of tuples is primarily for convnets and should be of form
((n_in_kernels, kernel_width, kernel_height),
(n_out_kernels, kernel_width, kernel_height))
random_state, numpy.random.RandomState() object
scale, float (default 1.)
default of 1. results in orthonormal random values sacled by 1.
Returns
-------
initialized_ortho, array-like
Array-like of random values the same size as shape parameter
References
----------
Exact solutions to the nonlinear dynamics of learning in deep linear
neural networks
A. Saxe, J. McClelland, S. Ganguli
"""
if type(shape[0]) is tuple:
shp = (shape[1][0], shape[0][0]) + shape[1][1:]
flat_shp = (shp[0], np.prd(shp[1:]))
else:
shp = shape
flat_shp = shape
g = random_state.randn(*flat_shp)
U, S, VT = linalg.svd(g, full_matrices=False)
res = U if U.shape == flat_shp else VT # pick one with the correct shape
res = res.reshape(shp)
return (scale * res).astype(theano.config.floatX)
def np_identity(shape, random_state, scale=0.98):
"""
Identity initialization for square matrices
Parameters
----------
shape, tuple of ints
shape of resulting array - shape[0] and shape[1] must match
random_state, numpy.random.RandomState() object
scale, float (default 0.98)
default of .98 results in .98 * eye initialization
Returns
-------
initialized_identity, array-like
identity initialized square matrix same size as shape
References
----------
A Simple Way To Initialize Recurrent Networks of Rectified Linear Units
Q. Le, N. Jaitly, G. Hinton
"""
assert shape[0] == shape[1]
res = np.eye(shape[0])
return (scale * res).astype(theano.config.floatX)
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
if axis < 0:
axis = tensor_list[0].ndim + axis
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
def as_shared(arr, name=None):
""" Quick wrapper for theano.shared """
if type(arr) in [float, int]:
if name is not None:
return theano.shared(np.cast[theano.config.floatX](arr))
else:
return theano.shared(np.cast[theano.config.floatX](arr), name=name)
if name is not None:
return theano.shared(value=arr, borrow=True)
else:
return theano.shared(value=arr, name=name, borrow=True)
def apply_shared(list_of_numpy):
return [as_shared(arr) for arr in list_of_numpy]
def make_numpy_biases(bias_dims):
return [np_zeros((dim,)) for dim in bias_dims]
def make_biases(bias_dims):
"""
Will return as many things as are in the list of out_dims
You *must* get a list back, even for 1 element returned!
blah, = make_biases(...)
or
[blah] = make_biases(...)
"""
bs = make_numpy_biases(bias_dims)
return apply_shared(bs)
def make_numpy_weights(in_dim, out_dims, random_state, init=None,
scale="default"):
"""
Will return as many things as are in the list of out_dims
You *must* get a list back, even for 1 element returned!
blah, = make_weights(...)
or
[blah] = make_weights(...)
"""
ff = [None] * len(out_dims)
for i, out_dim in enumerate(out_dims):
if init == None:
if in_dim == out_dim:
ff[i] = np_ortho
else:
ff[i] = np_variance_scaled_uniform
elif init == "normal":
ff[i] = np_normal
elif init == "fan":
ff[i] = np_tanh_fan_normal
elif init == "ortho":
ff[i] = np_ortho
else:
raise ValueError("Unknown init type %s" % init)
if scale == "default":
ws = [ff[i]((in_dim, out_dim), random_state)
for i, out_dim in enumerate(out_dims)]
else:
ws = [ff[i]((in_dim, out_dim), random_state, scale=scale)
for i, out_dim in enumerate(out_dims)]
return ws
def make_weights(in_dim, out_dims, random_state, init=None,
scale="default"):
"""
Will return as many things as are in the list of out_dims
You *must* get a list back, even for 1 element returned!
blah, = make_weights(...)
or
[blah] = make_weights(...)
"""
ws = make_numpy_weights(in_dim, out_dims, random_state, init=init,
scale=scale)
return apply_shared(ws)
def LearnedInitHidden(list_of_inputs, list_of_shapes):
# Helper to allow switch for learned hidden inits
ret = []
assert len(list_of_inputs) == len(list_of_shapes)
for i, shp in enumerate(list_of_shapes):
name = None
s = param(name, make_numpy_biases([shp[1]])[0])
ss = s[None, :] * tensor.ones((shp[0], shp[1]))
init = theano.ifelse.ifelse(tensor.abs_(ss.sum()) < 1E-12,
ss, list_of_inputs[i])
ret.append(init)
return ret
def Embedding(indices, n_symbols, output_dim, random_state, name=None):
"""
Last dimension of indices tensor must be 1!!!!
"""
vectors = param(name,
random_state.randn(
n_symbols,
output_dim
).astype(theano.config.floatX)
)
ii = indices.astype("int32")
output_shape = [
ii.shape[i]
for i in range(ii.ndim - 1)
] + [output_dim]
return vectors[ii.flatten()].reshape(output_shape)
def Linear(list_of_inputs, input_dims, output_dim, random_state, name=None,
init=None, scale="default", weight_norm=None, biases=True):
"""
Can pass weights and biases directly if needed through init
"""
if weight_norm is None:
# Let other classes delegate to default of linear
weight_norm = True
input_var = tensor.concatenate(list_of_inputs, axis=-1)
input_dim = sum(input_dims)
terms = []
if (init is None) or (type(init) is str):
weight_values, = make_numpy_weights(input_dim, [output_dim],
random_state=random_state,
init=init, scale=scale)
else:
weight_values = init[0]
weight = param((name, "W"), weight_values)
# From Ishaan G.
# http://arxiv.org/abs/1602.07868
if weight_norm:
norm_values = np.linalg.norm(weight_values, axis=0)
norms = param((name, "wn"), norm_values)
normed_weight = weight * (norms / weight.norm(2, axis=0)).dimshuffle('x', 0)
terms.append(tensor.dot(input_var, normed_weight))
else:
terms.append(tensor.dot(input_var, weight))
if biases:
if (init is None) or (type(init) is str):
b, = make_numpy_biases([output_dim])
else:
b = init[1]
terms.append(param((name, "b"), b))
out = reduce(lambda a, b: a + b, terms)
out.name = get_generic_name() + ".output"
return out
def make_conv_weights(in_dim, out_dims, kernel_size, random_state):
"""
Will return as many things as are in the list of out_dims
You *must* get a list back, even for 1 element returned!
blah, = make_conv_weights(...)
or
[blah] = make_conv_weights(...)
"""
return apply_shared([np_tanh_fan_normal(
((in_dim, kernel_size[0], kernel_size[1]),
(out_dim, kernel_size[0], kernel_size[1])), random_state)
for out_dim in out_dims])
def conv2d(input, filters, biases=None, border_mode=0, stride=(1, 1)):
"""
Light wrapper around conv2d - optionally handle biases
"""
r = nnet.conv2d(
input=input,
filters=filters,
border_mode=border_mode,
subsample=stride,
filter_flip=True)
if biases is None:
return r
else:
return r + biases.dimshuffle('x', 0, 'x', 'x')
def unpool(input, pool_size=(1, 1)):
"""
Repeat unpooling
"""
return input.repeat(pool_size[0], axis=2).repeat(pool_size[1], axis=3)
def conv2d_transpose(input, filters, biases=None, border_mode=0, stride=(1, 1)):
"""
Light wrapper around conv2d_transpose
"""
# swap to in dim out dim to make life easier
filters = filters.transpose(1, 0, 2, 3)
r = conv2d_grad_wrt_inputs(
output_grad=input,
filters=filters,
input_shape=(None, None, input.shape[2], input.shape[3]),
border_mode=border_mode,
subsample=stride,
filter_flip=True)
if biases is None:
return r
else:
return r + biases.dimshuffle('x', 0, 'x', 'x')
def t_conv_out_size(input_size, filter_size, stride, pad):
# Author: Francesco Visin
"""Computes the length of the output of a transposed convolution
Parameters
----------
input_size : int, Iterable or Theano tensor
The size of the input of the transposed convolution
filter_size : int, Iterable or Theano tensor
The size of the filter
stride : int, Iterable or Theano tensor
The stride of the transposed convolution
pad : int, Iterable, Theano tensor or string
The padding of the transposed convolution
"""
if input_size is None:
return None
input_size = np.array(input_size)
filter_size = np.array(filter_size)
stride = np.array(stride)
if isinstance(pad, (int, Iterable)) and not isinstance(pad, str):
pad = np.array(pad)
output_size = (input_size - 1) * stride + filter_size - 2*pad
elif pad == 'full':
output_size = input_size * stride - filter_size - stride + 2
elif pad == 'valid':
output_size = (input_size - 1) * stride + filter_size
elif pad == 'same':
output_size = input_size
return output_size
def gru_weights(input_dim, hidden_dim, forward_init=None, hidden_init="normal",
random_state=None):
if random_state is None:
raise ValueError("Must pass random_state!")
shape = (input_dim, hidden_dim)
if forward_init == "normal":
W = np.hstack([np_normal(shape, random_state),
np_normal(shape, random_state),
np_normal(shape, random_state)])
elif forward_init == "fan":
W = np.hstack([np_tanh_fan_normal(shape, random_state),
np_tanh_fan_normal(shape, random_state),
np_tanh_fan_normal(shape, random_state)])
elif forward_init == None:
if input_dim == hidden_dim:
W = np.hstack([np_ortho(shape, random_state),
np_ortho(shape, random_state),
np_ortho(shape, random_state)])
else:
# lecun
W = np.hstack([np_variance_scaled_uniform(shape, random_state),
np_variance_scaled_uniform(shape, random_state),
np_variance_scaled_uniform(shape, random_state)])
else:
raise ValueError("Unknown forward init type %s" % forward_init)
b = np_zeros((3 * shape[1],))
if hidden_init == "normal":
Wur = np.hstack([np_normal((shape[1], shape[1]), random_state),
np_normal((shape[1], shape[1]), random_state), ])
U = np_normal((shape[1], shape[1]), random_state)
elif hidden_init == "ortho":
Wur = np.hstack([np_ortho((shape[1], shape[1]), random_state),
np_ortho((shape[1], shape[1]), random_state), ])
U = np_ortho((shape[1], shape[1]), random_state)
return W, b, Wur, U
def GRU(inp, gate_inp, previous_state, input_dim, hidden_dim, random_state,
name=None, init=None, scale="default", weight_norm=None, biases=True):
if init == None:
hidden_init="ortho"
else:
raise ValueError("Not yet configured for other inits")
_, _, Wur, U = gru_weights(input_dim, hidden_dim,
hidden_init=hidden_init,
random_state=random_state)
dim = hidden_dim
f1 = Linear([previous_state], [2 * hidden_dim], 2 * hidden_dim,
random_state, name=(name, "update/reset"), init=[Wur],
biases=False)
gates = tensor.nnet.sigmoid(f1 + gate_inp)
update = gates[:, :dim]
reset = gates[:, dim:]
state_reset = previous_state * reset
f2 = Linear([state_reset], [hidden_dim], hidden_dim,
random_state, name=(name, "state"), init=[U], biases=False)
next_state = tensor.tanh(f2 + inp)
next_state = next_state * update + previous_state * (1 - update)
return next_state
def GRUFork(list_of_inputs, input_dims, output_dim, random_state, name=None,
init=None, scale="default", weight_norm=None, biases=True):
gates = Linear(list_of_inputs, input_dims, 3 * output_dim,
random_state=random_state,
name=(name, "gates"), init=init, scale=scale,
weight_norm=weight_norm,
biases=biases)
dim = output_dim
if gates.ndim == 2:
d = gates[:, :dim]
g = gates[:, dim:]
elif gates.ndim == 3:
d = gates[:, :, :dim]
g = gates[:, :, dim:]
else:
raise ValueError("Unsupported ndim")
return d, g
def logsumexp(x, axis=None):
x_max = tensor.max(x, axis=axis, keepdims=True)
z = tensor.log(tensor.sum(tensor.exp(x - x_max),
axis=axis, keepdims=True)) + x_max
return z.sum(axis=axis)
def softmax(X):
# should work for both 2D and 3D
dim = X.ndim
e_X = tensor.exp(X - X.max(axis=dim - 1, keepdims=True))
out = e_X / e_X.sum(axis=dim - 1, keepdims=True)
return out
def numpy_softmax(X, temperature=1.):
# should work for both 2D and 3D
dim = X.ndim
X = X / temperature
e_X = np.exp((X - X.max(axis=dim - 1, keepdims=True)))
out = e_X / e_X.sum(axis=dim - 1, keepdims=True)
return out
def elu(x, alpha=1):
"""
Compute the element-wise exponential linear activation function.
From theano 0.0.8 - here for backwards compat
"""
return tensor.switch(x > 0, x, alpha * (tensor.exp(x) - 1))
def relu(x):
return x * (x > 1e-6)
def tanh(x):
return tensor.tanh(x)
def sigmoid(x):
return tensor.nnet.sigmoid(x)
def theano_one_hot(t, n_classes=None):
if n_classes is None:
n_classes = tensor.max(t) + 1
ranges = tensor.shape_padleft(tensor.arange(n_classes), t.ndim)
return tensor.eq(ranges, tensor.shape_padright(t, 1))
def binary_crossentropy(predicted_values, true_values):
"""
Bernoulli negative log likelihood of predicted compared to binary
true_values
Parameters
----------
predicted_values : tensor, shape 2D or 3D
The predicted values out of some layer, normally a sigmoid_layer
true_values : tensor, shape 2D or 3D
The ground truth values. Mush have same shape as predicted_values
Returns
-------
binary_crossentropy : tensor, shape predicted_values.shape[1:]
The cost per sample, or per sample per step if 3D
"""
return (-true_values * tensor.log(predicted_values) - (
1 - true_values) * tensor.log(1 - predicted_values)).sum(axis=-1)
def categorical_crossentropy(predicted_values, true_values, eps=0.):
"""
Multinomial negative log likelihood of predicted compared to one hot
true_values
Parameters
----------
predicted_values : tensor, shape 2D or 3D
The predicted class probabilities out of some layer,
normally the output of a softmax
true_values : tensor, shape 2D or 3D
Ground truth one hot values
eps : float, default 0
Epsilon to be added during log calculation to avoid NaN values.
Returns
-------
categorical_crossentropy : tensor, shape predicted_values.shape[1:]
The cost per sample, or per sample per step if 3D
"""
tv = true_values.reshape((-1, true_values.shape[-1]))
indices = tensor.argmax(tv, axis=-1)
rows = tensor.arange(true_values.shape[0])
if eps > 0:
p = tensor.cast(predicted_values, theano.config.floatX) + eps
p /= tensor.sum(p, axis=predicted_values.ndim - 1, keepdims=True)
else:
p = tensor.cast(predicted_values, theano.config.floatX)
if predicted_values.ndim < 3:
return -tensor.log(p)[rows, indices]
elif predicted_values.ndim >= 3:
shp = predicted_values.shape
pred = p.reshape((-1, shp[-1]))
ind = indices.reshape((-1,))
s = tensor.arange(pred.shape[0])
correct = -tensor.log(pred)[s, ind]
return correct.reshape(shp[:-1])
def sample_binomial(coeff, n_bins, theano_rng, debug=False):
# ? Normal approximation?
if coeff.ndim > 2:
raise ValueError("Unsupported dim")
if debug:
idx = coeff * n_bins
else:
shp = coeff.shape
inc = tensor.ones((shp[0], shp[1], n_bins))
expanded_coeff = coeff.dimshuffle(0, 1, 'x')
expanded_coeff = expanded_coeff * inc
# n > 1 not supported?
# idx = theano_rng.binomial(n=n_bins, p=coeff, dtype=coeff.dtype)
idx = theano_rng.binomial(n=1, p=expanded_coeff, dtype=coeff.dtype,
size=expanded_coeff.shape)
idx = idx.sum(axis=-1)
return tensor.cast(idx, theano.config.floatX)
def sample_softmax(coeff, theano_rng, epsilon=1E-5, debug=False):
if coeff.ndim > 2:
raise ValueError("Unsupported dim")
if debug:
idx = coeff.argmax(axis=1)
else:
idx = tensor.argmax(theano_rng.multinomial(pvals=coeff, dtype=coeff.dtype),
axis=1)
return tensor.cast(idx, theano.config.floatX)
def numpy_sample_softmax(coeff, random_state, debug=False):
if coeff.ndim > 2:
raise ValueError("Unsupported dim")
if debug:
idx = coeff.argmax(axis=1)
else:
# renormalize to avoid numpy errors about summation...
coeff = coeff / (coeff.sum(axis=1, keepdims=True) + 1E-6)
idxs = [np.argmax(random_state.multinomial(1, pvals=coeff[i]))
for i in range(len(coeff))]
idx = np.array(idxs)
return idx.astype(theano.config.floatX)
def sample_diagonal_gmm(mu, sigma, coeff, theano_rng, epsilon=1E-5,
debug=False):
if debug:
idx = tensor.argmax(coeff, axis=1)
else:
idx = tensor.argmax(
theano_rng.multinomial(pvals=coeff, dtype=coeff.dtype), axis=1)
mu = mu[tensor.arange(mu.shape[0]), :, idx]
sigma = sigma[tensor.arange(sigma.shape[0]), :, idx]
if debug:
z = 0.
else:
z = theano_rng.normal(size=mu.shape, avg=0., std=1., dtype=mu.dtype)
s = mu + sigma * z
return s
def sample_single_dimensional_gmms(mu, sigma, coeff, theano_rng, epsilon=1E-5,
debug=False):
if debug:
idx = tensor.argmax(coeff, axis=1)
else:
idx = tensor.argmax(
theano_rng.multinomial(pvals=coeff, dtype=coeff.dtype), axis=1)
mu = mu[tensor.arange(mu.shape[0]), :, idx]
sigma = sigma[tensor.arange(sigma.shape[0]), :, idx]
if debug:
z = 0.
else:
z = theano_rng.normal(size=mu.shape, avg=0., std=1., dtype=mu.dtype)
s = mu + sigma * z
return s
def diagonal_gmm(true, mu, sigma, coeff, epsilon=1E-5):
n_dim = true.ndim
shape_t = true.shape
true = true.reshape((-1, shape_t[-1]))
true = true.dimshuffle(0, 1, 'x')
inner = tensor.log(2 * np.pi) + 2 * tensor.log(sigma)
inner += tensor.sqr((true - mu) / sigma)
inner = -0.5 * tensor.sum(inner, axis=1)
nll = -logsumexp(tensor.log(coeff) + inner, axis=1)
nll = nll.reshape(shape_t[:-1], ndim=n_dim-1)
return nll
def diagonal_phase_gmm(true, mu, sigma, coeff, epsilon=1E-5):
n_dim = true.ndim
shape_t = true.shape
true = true.reshape((-1, shape_t[-1]))
true = true.dimshuffle(0, 1, 'x')
inner0 = np.pi - abs(tensor.mod(true - mu, 2 * np.pi) - np.pi)
inner = tensor.log(2 * np.pi) + 2 * tensor.log(sigma)
inner += tensor.sqr(inner0 / sigma)
inner = -0.5 * tensor.sum(inner, axis=1)
nll = -logsumexp(tensor.log(coeff) + inner, axis=1)
nll = nll.reshape(shape_t[:-1], ndim=n_dim-1)
return nll
def single_dimensional_gmms(true, mu, sigma, coeff, epsilon=1E-5):
shape_t = true.shape
true = true.reshape((-1, shape_t[-1]))
true = true.dimshuffle(0, 1, 'x')
inner = tensor.log(2 * np.pi) + 2 * tensor.log(sigma)
inner += tensor.sqr((true - mu) / sigma)
inner = -0.5 * inner
nll = -logsumexp(tensor.sum(tensor.log(coeff) + inner, axis=1), axis=1)
nll = nll.reshape((shape_t[0], shape_t[1]))
return nll
def single_dimensional_phase_gmms(true, mu, sigma, coeff, epsilon=1E-5):
shape_t = true.shape
true = true.reshape((-1, shape_t[-1]))
true = true.dimshuffle(0, 1, 'x')
inner0 = np.pi - abs(tensor.mod(true - mu, 2 * np.pi) - np.pi)
inner = tensor.log(2 * np.pi) + 2 * tensor.log(sigma)
inner += tensor.sqr(inner0 / sigma)
inner = -0.5 * inner
nll = -logsumexp(tensor.sum(tensor.log(coeff) + inner, axis=1), axis=1)
nll = nll.reshape((shape_t[0], shape_t[1]))
return nll
def bernoulli_and_bivariate_gmm(true, mu, sigma, corr, coeff, binary,
epsilon=1E-5):
n_dim = true.ndim
shape_t = true.shape
true = true.reshape((-1, shape_t[-1]))
true = true.dimshuffle(0, 1, 'x')
mu_1 = mu[:, 0, :]
mu_2 = mu[:, 1, :]
sigma_1 = sigma[:, 0, :]
sigma_2 = sigma[:, 1, :]
binary = (binary + epsilon) * (1 - 2 * epsilon)
c_b = tensor.sum(tensor.xlogx.xlogy0(true[:, 0], binary) + tensor.xlogx.xlogy0(
1 - true[:, 0], 1 - binary), axis=1)
inner1 = (0.5 * tensor.log(1. - corr ** 2 + epsilon))
inner1 += tensor.log(sigma_1) + tensor.log(sigma_2)
inner1 += tensor.log(2. * np.pi)
t1 = true[:, 1]
t2 = true[:, 2]
Z = (((t1 - mu_1)/sigma_1)**2) + (((t2 - mu_2) / sigma_2)**2)
Z -= (2. * (corr * (t1 - mu_1)*(t2 - mu_2)) / (sigma_1 * sigma_2))
inner2 = 0.5 * (1. / (1. - corr**2 + epsilon))
cost = - (inner1 + (inner2 * Z))
nll = -logsumexp(tensor.log(coeff) + cost, axis=1)
nll -= c_b
return nll.reshape(shape_t[:-1], ndim=n_dim-1)
def sample_bernoulli_and_bivariate_gmm(mu, sigma, corr, coeff, binary,
theano_rng, epsilon=1E-5):
idx = tensor.argmax(theano_rng.multinomial(pvals=coeff, dtype=coeff.dtype),
axis=1)
mu = mu[tensor.arange(mu.shape[0]), :, idx]
sigma = sigma[tensor.arange(sigma.shape[0]), :, idx]
corr = corr[tensor.arange(corr.shape[0]), idx]
mu_x = mu[:, 0]
mu_y = mu[:, 1]
sigma_x = sigma[:, 0]
sigma_y = sigma[:, 1]
z = theano_rng.normal(size=mu.shape, avg=0., std=1., dtype=mu.dtype)
un = theano_rng.uniform(size=binary.shape)
binary = tensor.cast(un < binary, theano.config.floatX)
s_x = (mu_x + sigma_x * z[:, 0]).dimshuffle(0, 'x')
s_y = mu_y + sigma_y * (
(z[:, 0] * corr) + (z[:, 1] * tensor.sqrt(1. - corr ** 2)))
s_y = s_y.dimshuffle(0, 'x')
s = tensor.concatenate([binary, s_x, s_y], axis=1)
return s
"""
end initializers and Theano functions
"""
"""
start optimizers
"""
def gradient_clipping(grads, rescale=5.):
grad_norm = tensor.sqrt(sum(map(lambda x: tensor.sqr(x).sum(), grads)))
scaling_num = rescale
scaling_den = tensor.maximum(rescale, grad_norm)
scaling = scaling_num / scaling_den
return [g * scaling for g in grads]
class sgd(object):
"""
Vanilla SGD
"""
def __init__(self, params, learning_rate):
self.learning_rate = as_shared(learning_rate)
def updates(self, params, grads):
learning_rate = self.learning_rate
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
updates.append((param, param - learning_rate * grad))
return updates
class sgd_momentum(object):
"""
SGD with momentum
"""
def __init__(self, params, learning_rate, momentum):
self.learning_rate = as_shared(learning_rate)
self.momentum = momentum
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads):
learning_rate = self.learning_rate
momentum = self.momentum
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
memory = self.memory_[n]
updates.append((param, param - learning_rate * grad))
updates.append((memory, momentum * memory + (1. - momentum) * grad))
return updates
class sgd_nesterov(object):
"""
SGD with nesterov momentum
Based on example from Yann D.
See Formula 7 from
Advances in Optimizing Recurrent Neural Networks
Y. Benio, N. Boulanger-Lewandowski, R. Pascanu
"""
def __init__(self, params, learning_rate, momentum):
self.learning_rate = as_shared(learning_rate)
self.momentum = momentum
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads):
learning_rate = self.learning_rate
momentum = self.momentum
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
memory = self.memory_[n]
update = momentum * memory - learning_rate * grad
update2 = momentum * momentum * memory - (
1 + momentum) * learning_rate * grad
updates.append((memory, update))
updates.append((param, param + update2))
return updates
class rmsprop(object):
"""
RMSProp with nesterov momentum and gradient rescaling
"""
def __init__(self, params, learning_rate, momentum, rescale=5.):
self.learning_rate = as_shared(learning_rate)
self.momentum = momentum
self.rescale = rescale
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.squared_memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.momentum_memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads):
learning_rate = self.learning_rate
momentum = self.momentum
rescale = self.rescale
grad_norm = tensor.sqrt(sum(map(lambda x: tensor.sqr(x).sum(), grads)))
scaling_num = rescale
scaling_den = tensor.maximum(rescale, grad_norm)
scaling = scaling_num / scaling_den
# constants, from AG "Generating Sequences with Recurrent Neural
# Networks"
decay = 0.95
minimum_grad = 1E-4
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
grad *= scaling
memory = self.memory_[n]
squared_memory = self.squared_memory_[n]
momentum_memory = self.momentum_memory_[n]
grad_gi = decay * memory + (1 - decay) * grad
decayed_ni = decay * squared_memory + (1 - decay) * grad ** 2
grad_scaled = grad / tensor.sqrt(
decayed_ni - grad_gi ** 2 + minimum_grad)
update = momentum * momentum_memory - learning_rate * grad_scaled
update2 = momentum * momentum * momentum_memory - (
1 + momentum) * learning_rate * grad_scaled
updates.append((memory, grad_gi))
updates.append((squared_memory, decayed_ni))
updates.append((momentum_memory, update))
updates.append((param, param + update2))
return updates
class adagrad(object):
"""
Adagrad optimizer
"""
def __init__(self, params, learning_rate, eps=1E-8):
self.learning_rate = as_shared(learning_rate)
self.eps = eps
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads):
learning_rate = self.learning_rate
eps = self.eps
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
memory = self.memory_[n]
m_t = memory + grad ** 2
g_t = grad / (eps + tensor.sqrt(m_t))
p_t = param - learning_rate * g_t
updates.append((memory, m_t))
updates.append((param, p_t))
return updates
class adadelta(object):
"""
An adaptive learning rate optimizer
For more information, see:
Matthew D. Zeiler, "ADADELTA: An Adaptive Learning Rate Method"
arXiv:1212.5701.
"""
def __init__(self, params, running_grad_decay=0.95, running_up_decay=0.95,
eps=1E-6):
self.running_grad_decay = running_grad_decay
self.running_up_decay = running_up_decay
self.eps = eps
self.running_up2_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.running_grads2_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.previous_grads_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
def updates(self, params, grads):
running_grad_decay = self.running_grad_decay
running_up_decay = self.running_up_decay
eps = self.eps
updates = []
for n, (param, grad) in enumerate(zip(params, grads)):
running_grad2 = self.running_grads2_[n]
running_up2 = self.running_up2_[n]
previous_grad = self.previous_grads_[n]
rg2up = running_grad_decay * running_grad2 + (
1. - running_grad_decay) * (grad ** 2)
updir = -tensor.sqrt(running_up2 + eps) / tensor.sqrt(
running_grad2 + eps) * previous_grad
ru2up = running_up_decay * running_up2 + (
1. - running_up_decay) * (updir ** 2)
updates.append((previous_grad, grad))
updates.append((running_grad2, rg2up))
updates.append((running_up2, ru2up))
updates.append((param, param + updir))
return updates
class adam(object):
"""
Adam optimizer
Based on implementation from @NewMu / Alex Radford
"""
def __init__(self, params, learning_rate, b1=0.1, b2=0.001, eps=1E-8):
self.learning_rate = as_shared(learning_rate)
self.b1 = b1
self.b2 = b2
self.eps = eps
self.memory_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.velocity_ = [theano.shared(np.zeros_like(p.get_value()))
for p in params]
self.itr_ = theano.shared(np.array(0.).astype(theano.config.floatX))
def updates(self, params, grads):
learning_rate = self.learning_rate
b1 = self.b1
b2 = self.b2
eps = self.eps
updates = []
itr = self.itr_
i_t = itr + 1.
fix1 = 1. - (1. - b1) ** i_t
fix2 = 1. - (1. - b2) ** i_t
lr_t = learning_rate * (tensor.sqrt(fix2) / fix1)
for n, (param, grad) in enumerate(zip(params, grads)):
memory = self.memory_[n]
velocity = self.velocity_[n]
m_t = (b1 * grad) + ((1. - b1) * memory)
v_t = (b2 * tensor.sqr(grad)) + ((1. - b2) * velocity)
g_t = m_t / (tensor.sqrt(v_t) + eps)
p_t = param - (lr_t * g_t)
updates.append((memory, m_t))
updates.append((velocity, v_t))
updates.append((param, p_t))
updates.append((itr, i_t))
return updates
"""
end optimizers
"""
"""
start training utilities
"""
def get_shared_variables_from_function(func):
shared_variable_indices = [n for n, var in enumerate(func.maker.inputs)
if isinstance(var.variable,
theano.compile.SharedVariable)]
shared_variables = [func.maker.inputs[i].variable
for i in shared_variable_indices]
return shared_variables
def get_values_from_function(func):
return [v.get_value() for v in get_shared_variables_from_function(func)]
def safe_zip(*args):
"""Like zip, but ensures arguments are of same length.
Borrowed from pylearn2
"""
base = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != base:
raise ValueError("Argument 0 has length %d but argument %d has "
"length %d" % (base, i+1, len(arg)))
return zip(*args)
def set_shared_variables_in_function(func, list_of_values):
# TODO : Add checking that sizes are OK
shared_variable_indices = [n for n, var in enumerate(func.maker.inputs)
if isinstance(var.variable,
theano.compile.SharedVariable)]
shared_variables = [func.maker.inputs[i].variable
for i in shared_variable_indices]
[s.set_value(v) for s, v in safe_zip(shared_variables, list_of_values)]
def save_weights(save_path, items_dict, use_resource_dir=True):
logger.info("Not saving weights due to copy issues in npz")
return
weights_dict = {}
# k is the function name, v is a theano function
for k, v in items_dict.items():
if isinstance(v, theano.compile.function_module.Function):
# w is all the numpy values from a function
w = get_values_from_function(v)
for n, w_v in enumerate(w):
weights_dict[k + "_%i" % n] = w_v
if use_resource_dir:
# Assume it ends with .py ...
script_name = get_script_name()[:-3]
save_path = os.path.join(get_resource_dir(script_name), save_path)
logger.info("Saving weights to %s" % save_weights_path)
if len(weights_dict.keys()) > 0:
np.savez(save_path, **weights_dict)
else:
logger.info("Possible BUG: no theano functions found in items_dict, "
"unable to save weights!")
logger.info("Weight saving complete %s" % save_path)
@coroutine
def threaded_weights_writer(maxsize=25):
"""
Expects to be sent a tuple of (save_path, checkpoint_dict)
"""
messages = Queue.PriorityQueue(maxsize=maxsize)
def run_thread():
while True:
p, item = messages.get()
if item is GeneratorExit:
return
else:
save_path, items_dict = item
save_weights(save_path, items_dict)
threading.Thread(target=run_thread).start()
try:
n = 0
while True:
item = (yield)
messages.put((n, item))
n -= 1
except GeneratorExit:
messages.put((1, GeneratorExit))
def save_checkpoint(save_path, pickle_item, use_resource_dir=True):
if use_resource_dir:
# Assume it ends with .py ...
script_name = get_script_name()[:-3]
save_path = os.path.join(get_resource_dir(script_name), save_path)
sys.setrecursionlimit(40000)
logger.info("Saving checkpoint to %s" % save_path)
with open(save_path, mode="wb") as f:
pickle.dump(pickle_item, f, protocol=-1)
logger.info("Checkpoint saving complete %s" % save_path)
@coroutine
def threaded_checkpoint_writer(maxsize=25):
"""
Expects to be sent a tuple of (save_path, checkpoint_dict)
"""
messages = Queue.PriorityQueue(maxsize=maxsize)
def run_thread():
while True:
p, item = messages.get()
if item is GeneratorExit:
return
else:
save_path, pickle_item = item
save_checkpoint(save_path, pickle_item)
threading.Thread(target=run_thread).start()
try:
n = 0
while True:
item = (yield)
messages.put((n, item))
n -= 1
except GeneratorExit:
messages.put((1, GeneratorExit))
def load_checkpoint(saved_checkpoint_path):
logger.info("Loading checkpoint from %s" % saved_checkpoint_path)
old_recursion_limit = sys.getrecursionlimit()
sys.setrecursionlimit(40000)
with open(saved_checkpoint_path, mode="rb") as f:
pickle_item = pickle.load(f)
sys.setrecursionlimit(old_recursion_limit)
return pickle_item
def filled_js_template_from_results_dict(results_dict, default_show="all"):
# Uses arbiter strings in the template to split the template and stick
# values in
partial_path = get_resource_dir("js_plot_dependencies")
full_path = os.path.join(partial_path, "master.zip")
url = "http://github.com/kastnerkyle/simple_template_plotter/archive/master.zip"
if not os.path.exists(full_path):
logger.info("Downloading plotter template code from %s" % url)
download(url, full_path)
zip_ref = zipfile.ZipFile(full_path, 'r')
zip_ref.extractall(partial_path)
zip_ref.close()
js_path = os.path.join(partial_path, "simple_template_plotter-master")
template_path = os.path.join(js_path, "template.html")
f = open(template_path, mode='r')
all_template_lines = f.readlines()
f.close()
imports_split_index = [n for n, l in enumerate(all_template_lines)
if "IMPORTS_SPLIT" in l][0]
data_split_index = [n for n, l in enumerate(all_template_lines)
if "DATA_SPLIT" in l][0]
log_split_index = [n for n, l in enumerate(all_template_lines)
if "LOGGING_SPLIT" in l][0]
first_part = all_template_lines[:imports_split_index]
imports_part = []
js_files_path = os.path.join(js_path, "js")
js_file_names = ["jquery-1.9.1.js", "knockout-3.0.0.js",
"highcharts.js", "exporting.js"]
js_files = [os.path.join(js_files_path, jsf) for jsf in js_file_names]
for js_file in js_files:
with open(js_file, "r") as f:
imports_part.extend(
["<script>\n"] + f.readlines() + ["</script>\n"])
post_imports_part = all_template_lines[
imports_split_index + 1:data_split_index]
log_part = all_template_lines[data_split_index + 1:log_split_index]
last_part = all_template_lines[log_split_index + 1:]
def gen_js_field_for_key_value(key, values, show=True):
assert type(values) is list
if isinstance(values[0], (np.generic, np.ndarray)):
values = [float(v.ravel()) for v in values]
maxlen = 1500
if len(values) > maxlen:
values = list(np.interp(np.linspace(0, len(values), maxlen),
np.arange(len(values)), values))
show_key = "true" if show else "false"
return "{\n name: '%s',\n data: %s,\n visible: %s\n},\n" % (
str(key), str(values), show_key)
data_part = [gen_js_field_for_key_value(k, results_dict[k], True)
if k in default_show or default_show == "all"
else gen_js_field_for_key_value(k, results_dict[k], False)
for k in sorted(results_dict.keys())]
all_filled_lines = first_part + imports_part + post_imports_part
all_filled_lines = all_filled_lines + data_part + log_part
# add logging output
tmp = copy.copy(string_f)
tmp.seek(0)
log_output = tmp.readlines()
del tmp
all_filled_lines = all_filled_lines + log_output + last_part
return all_filled_lines
def save_results_as_html(save_path, results_dict, use_resource_dir=True,
default_no_show="_auto"):
show_keys = [k for k in results_dict.keys()
if default_no_show not in k]
as_html = filled_js_template_from_results_dict(
results_dict, default_show=show_keys)
if use_resource_dir:
# Assume it ends with .py ...
script_name = get_script_name()[:-3]
save_path = os.path.join(get_resource_dir(script_name), save_path)
logger.info("Saving HTML results %s" % save_path)
with open(save_path, "w") as f:
f.writelines(as_html)
logger.info("Completed HTML results saving %s" % save_path)
@coroutine
def threaded_html_writer(maxsize=25):
"""
Expects to be sent a tuple of (save_path, results_dict)
"""
messages = Queue.PriorityQueue(maxsize=maxsize)
def run_thread():
while True:
p, item = messages.get()
if item is GeneratorExit:
return
else:
save_path, results_dict = item
save_results_as_html(save_path, results_dict)
threading.Thread(target=run_thread).start()
try:
n = 0
while True:
item = (yield)
messages.put((n, item))
n -= 1
except GeneratorExit:
messages.put((1, GeneratorExit))
def implot(arr, title="", cmap="gray", save_name=None):
import matplotlib.pyplot as plt
f, ax = plt.subplots()
ax.matshow(arr, cmap=cmap)
plt.axis("off")
def autoaspect(x_range, y_range):
"""
The aspect to make a plot square with ax.set_aspect in Matplotlib
"""
mx = max(x_range, y_range)
mn = min(x_range, y_range)
if x_range <= y_range:
return mx / float(mn)
else:
return mn / float(mx)
x1 = arr.shape[0]
y1 = arr.shape[1]
asp = autoaspect(x1, y1)
ax.set_aspect(asp)
plt.title(title)
if save_name is None:
plt.show()
else:
plt.savefig(save_name)
def _archive(tag=None):
script_name = get_script_name()[:-3]
save_path = get_resource_dir(script_name)
if tag is None:
save_script_path = os.path.join(save_path, get_script_name())
else:
save_script_path = os.path.join(save_path, tag + "_" + get_script_name())
logger.info("Saving code archive for %s" % (save_path))
script_location = os.path.abspath(sys.argv[0])
shutil.copy2(script_location, save_script_path)
lib_location = os.path.realpath(__file__)
lib_name = lib_location.split(os.sep)[-1]
if tag is None:
save_lib_path = os.path.join(save_path, lib_name)
else:
save_lib_path = os.path.join(save_path, tag + "_" + lib_name)
shutil.copy2(lib_location, save_lib_path)
class Igor(object):
"""
Runs the loop - thin wrapper for serializing
Named in reference to https://www.reddit.com/r/MachineLearning/comments/4kd1qp/good_code_to_learn_from/d3e2irr
"""
def __init__(self, loop_function, train_function, train_itr,
valid_function, valid_itr, n_epochs, checkpoint_dict,
checkpoint_delay=10,
checkpoint_every_n_epochs=1,
checkpoint_every_n_updates=np.inf,
checkpoint_every_n_seconds=np.inf,
monitor_frequency=1000,
skip_minimums=False,
skip_intermediates=True,
skip_most_recents=False):
self.loop_function = loop_function
self.train_function = train_function
self.train_itr = train_itr
self.valid_function = valid_function
self.valid_itr = valid_itr
self.n_epochs = n_epochs
self.checkpoint_dict = checkpoint_dict
# These parameters should be serialized
self.checkpoint_delay = checkpoint_delay
self.checkpoint_every_n_epochs = checkpoint_every_n_epochs
self.checkpoint_every_n_updates = checkpoint_every_n_updates
self.checkpoint_every_n_seconds = checkpoint_every_n_seconds
self.monitor_frequency = monitor_frequency
self.skip_minimums = skip_minimums
self.skip_intermediates = skip_intermediates
self.skip_most_recents = skip_most_recents
# tracker to ensure restarting at the correct minibatch
self.num_train_minibatches_run = -1
def __getstate__(self):
skiplist = [self.loop_function,
self.train_function,
self.train_itr,
self.valid_function,
self.valid_itr,
self.n_epochs,
self.checkpoint_dict]
return {k:v for k, v in self.__dict__.items() if v not in skiplist}
def refresh(self, loop_function, train_function, train_itr,
valid_function, valid_itr, n_epochs,
checkpoint_dict):
# Must refresh after reloading from pkl
self.loop_function = loop_function
self.train_function = train_function
self.train_itr = train_itr
self.valid_function = valid_function
self.valid_itr = valid_itr
self.n_epochs = n_epochs
self.checkpoint_dict = checkpoint_dict
def run(self):
run_loop(self.loop_function, self.train_function, self.train_itr,
self.valid_function, self.valid_itr, self.n_epochs,
self.checkpoint_dict,
self.checkpoint_delay,
self.checkpoint_every_n_epochs,
self.checkpoint_every_n_updates,
self.checkpoint_every_n_seconds,
self.monitor_frequency,
self.skip_minimums,
self.skip_intermediates,
self.skip_most_recents,
self.num_train_minibatches_run,
self)
def run_loop(loop_function, train_function, train_itr,
valid_function, valid_itr, n_epochs, checkpoint_dict,
checkpoint_delay=10, checkpoint_every_n_epochs=1,
checkpoint_every_n_updates=np.inf,
checkpoint_every_n_seconds=np.inf,
monitor_frequency=1000, skip_minimums=False,
skip_intermediates=True, skip_most_recents=False,
skip_n_train_minibatches=-1,
stateful_object=None):
"""
TODO: add all logging info into the js report
TODO: add upload fields to add data to an html and save a copy
loop function should return a list of costs
stateful_object allows to serialize and relaunch in middle of an epoch
for long training models
"""
logger.info("Running loop...")
# Assume keys which are theano functions to ignore!
ignore_keys = [k for k, v in checkpoint_dict.items()
if isinstance(v, theano.compile.function_module.Function)]
_loop = loop_function
ident = str(uuid.uuid4())[:8]
random_state = np.random.RandomState(2177)
monitor_prob = 1. / monitor_frequency
non_ignored_keys = [k for k in checkpoint_dict.keys()
if k not in ignore_keys]
if len(non_ignored_keys) > 0:
overall_train_costs = checkpoint_dict["train_costs"]
overall_valid_costs = checkpoint_dict["valid_costs"]
# Auto tracking times
overall_epoch_deltas = checkpoint_dict["epoch_deltas_auto"]
overall_epoch_times = checkpoint_dict["epoch_times_auto"]
overall_train_deltas = checkpoint_dict["train_deltas_auto"]
overall_train_times = checkpoint_dict["train_times_auto"]
overall_valid_deltas = checkpoint_dict["valid_deltas_auto"]
overall_valid_times = checkpoint_dict["valid_times_auto"]
overall_checkpoint_deltas = checkpoint_dict["checkpoint_deltas_auto"]
overall_checkpoint_times = checkpoint_dict["checkpoint_times_auto"]
overall_joint_deltas = checkpoint_dict["joint_deltas_auto"]
overall_joint_times = checkpoint_dict["joint_times_auto"]
overall_train_checkpoint = checkpoint_dict["train_checkpoint_auto"]
overall_valid_checkpoint = checkpoint_dict["valid_checkpoint_auto"]
keys_checked = ["train_costs",
"valid_costs",
"epoch_deltas_auto",
"epoch_times_auto",
"train_deltas_auto",
"train_times_auto",
"valid_deltas_auto",
"valid_times_auto",
"checkpoint_deltas_auto",
"checkpoint_times_auto",
"joint_deltas_auto",
"joint_times_auto",
"train_checkpoint_auto",
"valid_checkpoint_auto"]
not_handled = [k for k in checkpoint_dict.keys()
if k not in keys_checked and k not in ignore_keys]
if len(not_handled) > 0:
raise ValueError("Unhandled keys %s in checkpoint_dict, exiting..." % not_handled)
epoch_time_total = overall_epoch_times[-1]
train_time_total = overall_train_times[-1]
valid_time_total = overall_valid_times[-1]
checkpoint_time_total = overall_checkpoint_times[-1]
joint_time_total = overall_joint_times[-1]
start_epoch = len(overall_train_costs)
else:
overall_train_costs = []
overall_valid_costs = []
overall_train_checkpoint = []
overall_valid_checkpoint = []
epoch_time_total = 0
train_time_total = 0
valid_time_total = 0
checkpoint_time_total = 0
joint_time_total = 0
overall_epoch_times = []
overall_epoch_deltas = []
overall_train_times = []
overall_train_deltas = []
overall_valid_times = []
overall_valid_deltas = []
# Add zeros to avoid errors
overall_checkpoint_times = [0]
overall_checkpoint_deltas = [0]
overall_joint_times = [0]
overall_joint_deltas = [0]
start_epoch = 0
# save current state of kdllib and calling script
_archive(ident)
tcw = threaded_checkpoint_writer()
tww = threaded_weights_writer()
thw = threaded_html_writer()
best_train_checkpoint_pickle = None
best_train_checkpoint_epoch = 0
best_valid_checkpoint_pickle = None
best_train_checkpoint_epoch = 0
# If there are more than 1M minibatches per epoch this will break!
# Not reallocating buffer greatly helps fast training models though
# Also we have bigger problems if there are 1M minibatches per epoch...
# This will get sliced down to the correct number of minibatches down below
train_costs = [0.] * 1000000
valid_costs = [0.] * 1000000
try:
for e in range(start_epoch, start_epoch + n_epochs):
joint_start = time.time()
epoch_start = time.time()
logger.info(" ")
logger.info("Starting training, epoch %i" % e)
logger.info(" ")
train_mb_count = 0
valid_mb_count = 0
results_dict = {k: v for k, v in checkpoint_dict.items()
if k not in ignore_keys}
this_results_dict = results_dict
try:
train_start = time.time()
last_time_checkpoint = train_start
while True:
if train_mb_count < skip_n_train_minibatches:
train_mb_count += 1
continue
partial_train_costs = _loop(train_function, train_itr)
train_costs[train_mb_count] = np.mean(partial_train_costs)
tc = train_costs[train_mb_count]
if np.isnan(tc):
logger.info("NaN detected in train cost, update %i" % train_mb_count)
raise StopIteration("NaN detected in train")
train_mb_count += 1
if (train_mb_count % checkpoint_every_n_updates) == 0:
checkpoint_save_path = "%s_model_update_checkpoint_%i.pkl" % (ident, train_mb_count)
weights_save_path = "%s_model_update_weights_%i.npz" % (ident, train_mb_count)
results_save_path = "%s_model_update_results_%i.html" % (ident, train_mb_count)
# Use pickle to preserve relationships between keys
# while still copying buffers
copy_pickle = pickle.dumps(checkpoint_dict)
copy_dict = pickle.loads(copy_pickle)
tcw.send((checkpoint_save_path, copy_dict))
tww.send((weights_save_path, copy_dict))
logger.info(" ")
logger.info("Update checkpoint after train mb %i" % train_mb_count)
logger.info("Current mean cost %f" % np.mean(partial_train_costs))
logger.info(" ")
this_results_dict["this_epoch_train_auto"] = train_costs[:train_mb_count]
tmb = train_costs[:train_mb_count]
running_train_mean = np.cumsum(tmb) / (np.arange(train_mb_count) + 1)
# needs to be a list
running_train_mean = list(running_train_mean)
this_results_dict["this_epoch_train_mean_auto"] = running_train_mean
thw.send((results_save_path, this_results_dict))
if stateful_object is not None:
stateful_object.num_train_minibatches_run = train_mb_count
object_save_path = "%s_model_update_object_%i.pkl" % (ident, train_mb_count)
save_checkpoint(object_save_path, stateful_object)
elif (time.time() - last_time_checkpoint) >= checkpoint_every_n_seconds:
time_diff = time.time() - train_start
last_time_checkpoint = time.time()
checkpoint_save_path = "%s_model_time_checkpoint_%i.pkl" % (ident, int(time_diff))
weights_save_path = "%s_model_time_weights_%i.npz" % (ident, int(time_diff))
results_save_path = "%s_model_time_results_%i.html" % (ident, int(time_diff))
# Use pickle to preserve relationships between keys
# while still copying buffers
copy_pickle = pickle.dumps(checkpoint_dict)
copy_dict = pickle.loads(copy_pickle)
tcw.send((checkpoint_save_path, copy_dict))
tww.send((weights_save_path, copy_dict))
logger.info(" ")
logger.info("Time checkpoint after train mb %i" % train_mb_count)
logger.info("Current mean cost %f" % np.mean(partial_train_costs))
logger.info(" ")
this_results_dict["this_epoch_train_auto"] = train_costs[:train_mb_count]
tmb = train_costs[:train_mb_count]
running_train_mean = np.cumsum(tmb) / (np.arange(train_mb_count) + 1)
# needs to be a list
running_train_mean = list(running_train_mean)
this_results_dict["this_epoch_train_mean_auto"] = running_train_mean
thw.send((results_save_path, this_results_dict))
if stateful_object is not None:
stateful_object.num_train_minibatches_run = train_mb_count
object_save_path = "%s_model_time_object_%i.pkl" % (ident, int(time_diff))
save_checkpoint(object_save_path, stateful_object)
draw = random_state.rand()
if draw < monitor_prob and not skip_intermediates:
logger.info(" ")
logger.info("Starting train mb %i" % train_mb_count)
logger.info("Current mean cost %f" % np.mean(partial_train_costs))
logger.info(" ")
results_save_path = "%s_intermediate_results.html" % ident
this_results_dict["this_epoch_train_auto"] = train_costs[:train_mb_count]
thw.send((results_save_path, this_results_dict))
except StopIteration:
# Slice so that only valid data is in the minibatch
# this also assumes there is not a variable number
# of minibatches in an epoch!
train_stop = time.time()
train_costs = train_costs[:train_mb_count]
logger.info(" ")
logger.info("Starting validation, epoch %i" % e)
logger.info(" ")
valid_start = time.time()
try:
while True:
partial_valid_costs = _loop(valid_function, valid_itr)
valid_costs[valid_mb_count] = np.mean(partial_valid_costs)
vc = valid_costs[valid_mb_count]
if np.isnan(vc):
logger.info("NaN detected in valid cost, minibatch %i" % valid_mb_count)
raise StopIteration("NaN detected in valid")
valid_mb_count += 1
draw = random_state.rand()
if draw < monitor_prob and not skip_intermediates:
logger.info(" ")
logger.info("Valid mb %i" % valid_mb_count)
logger.info("Current validation mean cost %f" % np.mean(
valid_costs))
logger.info(" ")
results_save_path = "%s_intermediate_results.html" % ident
this_results_dict["this_epoch_valid_auto"] = valid_costs[:valid_mb_count]
thw.send((results_save_path, this_results_dict))
except StopIteration:
pass
logger.info(" ")
valid_stop = time.time()
epoch_stop = time.time()
valid_costs = valid_costs[:valid_mb_count]
# Logging and tracking training statistics
epoch_time_delta = epoch_stop - epoch_start
epoch_time_total += epoch_time_delta
overall_epoch_deltas.append(epoch_time_delta)
overall_epoch_times.append(epoch_time_total)
train_time_delta = train_stop - train_start
train_time_total += train_time_delta
overall_train_deltas.append(train_time_delta)
overall_train_times.append(train_time_total)
valid_time_delta = valid_stop - valid_start
valid_time_total += valid_time_delta
overall_valid_deltas.append(valid_time_delta)
overall_valid_times.append(valid_time_total)
mean_epoch_train_cost = np.mean(train_costs)
# np.inf trick to avoid taking the min of length 0 list
old_min_train_cost = min(overall_train_costs + [np.inf])
if np.isnan(mean_epoch_train_cost):
logger.info("Previous train costs %s" % overall_train_costs[-5:])
logger.info("NaN detected in train cost, epoch %i" % e)
raise StopIteration("NaN detected in train")
overall_train_costs.append(mean_epoch_train_cost)
mean_epoch_valid_cost = np.mean(valid_costs)
old_min_valid_cost = min(overall_valid_costs + [np.inf])
if np.isnan(mean_epoch_valid_cost):
logger.info("Previous valid costs %s" % overall_valid_costs[-5:])
logger.info("NaN detected in valid cost, epoch %i" % e)
raise StopIteration("NaN detected in valid")
overall_valid_costs.append(mean_epoch_valid_cost)
if mean_epoch_train_cost < old_min_train_cost:
overall_train_checkpoint.append(mean_epoch_train_cost)
else:
overall_train_checkpoint.append(old_min_train_cost)
if mean_epoch_valid_cost < old_min_valid_cost:
overall_valid_checkpoint.append(mean_epoch_valid_cost)
else:
overall_valid_checkpoint.append(old_min_valid_cost)
checkpoint_dict["train_costs"] = overall_train_costs
checkpoint_dict["valid_costs"] = overall_valid_costs
# Auto tracking times
checkpoint_dict["epoch_deltas_auto"] = overall_epoch_deltas
checkpoint_dict["epoch_times_auto"] = overall_epoch_times
checkpoint_dict["train_deltas_auto"] = overall_train_deltas
checkpoint_dict["train_times_auto"] = overall_train_times
checkpoint_dict["valid_deltas_auto"] = overall_valid_deltas
checkpoint_dict["valid_times_auto"] = overall_valid_times
checkpoint_dict["checkpoint_deltas_auto"] = overall_checkpoint_deltas
checkpoint_dict["checkpoint_times_auto"] = overall_checkpoint_times
checkpoint_dict["joint_deltas_auto"] = overall_joint_deltas
checkpoint_dict["joint_times_auto"] = overall_joint_times
# Tracking if checkpoints are made
checkpoint_dict["train_checkpoint_auto"] = overall_train_checkpoint
checkpoint_dict["valid_checkpoint_auto"] = overall_valid_checkpoint
script = get_script_name()
hostname = socket.gethostname()
logger.info("Host %s, script %s" % (hostname, script))
logger.info("Epoch %i complete" % e)
logger.info("Epoch mean train cost %f" % mean_epoch_train_cost)
logger.info("Epoch mean valid cost %f" % mean_epoch_valid_cost)
logger.info("Previous train costs %s" % overall_train_costs[-5:])
logger.info("Previous valid costs %s" % overall_valid_costs[-5:])
results_dict = {k: v for k, v in checkpoint_dict.items()
if k not in ignore_keys}
# Checkpointing part
checkpoint_start = time.time()
if e < checkpoint_delay or skip_minimums:
pass
elif mean_epoch_valid_cost < old_min_valid_cost:
logger.info("Checkpointing valid...")
# Using dumps so relationship between keys in the pickle
# is preserved
best_valid_checkpoint_pickle = pickle.dumps(checkpoint_dict)
best_valid_checkpoint_epoch = e
if mean_epoch_train_cost < old_min_train_cost:
best_train_checkpoint_pickle = pickle.dumps(checkpoint_dict)
best_train_checkpoint_epoch = e
logger.info("Valid checkpointing complete.")
elif mean_epoch_train_cost < old_min_train_cost:
logger.info("Checkpointing train...")
best_train_checkpoint_pickle = pickle.dumps(checkpoint_dict)
best_train_checkpoint_epoch = e
logger.info("Train checkpointing complete.")
if e < checkpoint_delay:
pass
# Don't skip force checkpoints after default delay
# Printing already happens above
elif((e % checkpoint_every_n_epochs) == 0) or (e == (n_epochs - 1)):
logger.info("Checkpointing force...")
checkpoint_save_path = "%s_model_checkpoint_%i.pkl" % (ident, e)
weights_save_path = "%s_model_weights_%i.npz" % (ident, e)
results_save_path = "%s_model_results_%i.html" % (ident, e)
# Use pickle to preserve relationships between keys
# while still copying buffers
copy_pickle = pickle.dumps(checkpoint_dict)
copy_dict = pickle.loads(copy_pickle)
tcw.send((checkpoint_save_path, copy_dict))
tww.send((weights_save_path, copy_dict))
thw.send((results_save_path, results_dict))
logger.info("Force checkpointing complete.")
checkpoint_stop = time.time()
joint_stop = time.time()
if skip_most_recents:
pass
else:
# Save latest
results_save_path = "%s_most_recent_results.html" % ident
thw.send((results_save_path, results_dict))
# Will show up next go around
checkpoint_time_delta = checkpoint_stop - checkpoint_start
checkpoint_time_total += checkpoint_time_delta
overall_checkpoint_deltas.append(checkpoint_time_delta)
overall_checkpoint_times.append(checkpoint_time_total)
joint_time_delta = joint_stop - joint_start
joint_time_total += joint_time_delta
overall_joint_deltas.append(joint_time_delta)
overall_joint_times.append(joint_time_total)
except KeyboardInterrupt:
logger.info("Training loop interrupted by user! Saving current best results.")
if not skip_minimums:
# Finalize saving best train and valid
best_valid_checkpoint_dict = pickle.loads(best_valid_checkpoint_pickle)
best_valid_results_dict = {k: v for k, v in best_valid_checkpoint_dict.items()
if k not in ignore_keys}
ee = best_valid_checkpoint_epoch
checkpoint_save_path = "%s_model_checkpoint_valid_%i.pkl" % (ident, ee)
weights_save_path = "%s_model_weights_valid_%i.npz" % (ident, ee)
results_save_path = "%s_model_results_valid_%i.html" % (ident, ee)
tcw.send((checkpoint_save_path, best_valid_checkpoint_dict))
tww.send((weights_save_path, best_valid_checkpoint_dict))
thw.send((results_save_path, best_valid_results_dict))
best_train_checkpoint_dict = pickle.loads(best_train_checkpoint_pickle)
best_train_results_dict = {k: v for k, v in best_train_checkpoint_dict.items()
if k not in ignore_keys}
ee = best_train_checkpoint_epoch
checkpoint_save_path = "%s_model_checkpoint_train_%i.pkl" % (ident, ee)
weights_save_path = "%s_model_weights_train_%i.npz" % (ident, ee)
results_save_path = "%s_model_results_train_%i.html" % (ident, ee)
tcw.send((checkpoint_save_path, best_train_checkpoint_dict))
tww.send((weights_save_path, best_train_checkpoint_dict))
thw.send((results_save_path, best_train_results_dict))
logger.info("Loop finished, closing write threads (this may take a while!)")
tcw.close()
tww.close()
thw.close()
"""
end training utilities
"""
"""
start examples
"""
def run_lpc_example():
fs, X = fetch_sample_speech_tapestry()
window_size = 256
window_step = 128
a, g, e = lpc_analysis(X, order=8, window_step=window_step,
window_size=window_size, emphasis=0.9,
copy=True)
v, p = voiced_unvoiced(X, window_size=window_size,
window_step=window_step)
X_r = lpc_synthesis(a, g, e, voiced_frames=v,
emphasis=0.9, window_step=window_step)
wavfile.write("lpc_orig.wav", fs, soundsc(X))
wavfile.write("lpc_rec.wav", fs, soundsc(X_r))
def run_fft_dct_example():
# This is an example of the preproc we want to do with a lot of added noise
random_state = np.random.RandomState(1999)
fs, d, _ = fetch_sample_speech_fruit()
n_fft = 128
X = d[0]
X_stft = stft(X, n_fft)
X_rr = complex_to_real_view(X_stft).ravel()
X_dct = mdct_slow(X_rr, n_fft)
"""
X_dct_sub = X_dct[1:] - X_dct[:-1]
std = X_dct_sub.std(axis=0, keepdims=True)
X_dct_sub += .15 * std * random_state.randn(
X_dct_sub.shape[0], X_dct_sub.shape[1])
X_dct_unsub = np.cumsum(X_dct_sub, axis=0)
X_idct = imdct_slow(X_dct_unsub, n_fft).reshape(-1, n_fft)
"""
#std = X_dct.std(axis=0, keepdims=True)
#X_dct[:, 80:] = 0.
#X_dct += .8 * std * random_state.randn(
# X_dct.shape[0], X_dct.shape[1])
X_idct = imdct_slow(X_dct, n_fft).reshape(-1, n_fft)
X_irr = real_to_complex_view(X_idct)
X_r = istft(X_irr, n_fft)[:len(X)]
X_r = X_r - X_r.mean()
SNR = 20 * np.log10(np.linalg.norm(X - X_r) / np.linalg.norm(X))
print(SNR)
wavfile.write("fftdct_orig.wav", fs, soundsc(X))
wavfile.write("fftdct_rec.wav", fs, soundsc(X_r))
def run_blizzard_example():
bliz = Blizzard_dataset()
start = time.time()
itr = 1
while True:
r = bliz.next()
stop = time.time()
tot = stop - start
print("Threaded time: %s" % (tot))
print("Minibatch %s" % str(itr))
print("Time ratio (s per minibatch): %s" % (tot / float(itr)))
itr += 1
break
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
raise ValueError()
"""
end examples
"""
if __name__ == "__main__":
#run_fft_dct_example()
#run_lpc_example()
#run_blizzard_example()
#fetch_ono()
fetch_walla()
| bsd-3-clause |
Barmaley-exe/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
Ezibenroc/PyRoaringBitMap | quick_bench.py | 1 | 4894 | #! /usr/bin/env python3
import sys
import timeit
from pandas import DataFrame, Series
import random
try:
import tabulate
has_tabulate = True
except ImportError:
has_tabulate = False
sys.stderr.write('Warning: could not import tabulate\n')
sys.stderr.write(' see https://bitbucket.org/astanin/python-tabulate\n')
from pyroaring import BitMap
classes = {'set': set, 'pyroaring': BitMap}
nb_exp = 30
size = int(1e6)
density = 0.125
universe_size = int(size/density)
try:
from roaringbitmap import RoaringBitmap
classes['roaringbitmap'] = RoaringBitmap
except ImportError:
sys.stderr.write('Warning: could not import roaringbitmap\n')
sys.stderr.write(' see https://github.com/andreasvc/roaringbitmap/\n')
try:
from sortedcontainers.sortedset import SortedSet
classes['sortedcontainers'] = SortedSet
except ImportError:
sys.stderr.write('Warning: could not import sortedcontainers\n')
sys.stderr.write(' see https://github.com/grantjenks/sorted_containers\n')
try:
from croaring import BitSet
classes['python-croaring'] = BitSet
except ImportError:
sys.stderr.write('Warning: could not import croaring\n')
sys.stderr.write(' see https://github.com/sunzhaoping/python-croaring\n')
import_str = 'import array, pickle; from __main__ import %s' % (','.join(
['get_list', 'get_range', 'random', 'size', 'universe_size'] +
[cls.__name__ for cls in classes.values() if cls is not set]))
def run_exp(stmt, setup, number):
setup = '%s ; %s' % (import_str, setup)
try:
return timeit.timeit(stmt=stmt, setup=setup, number=number)/number
except Exception as e:
return float('nan')
def get_range():
r = (0, universe_size, int(1/density))
try:
return xrange(*r)
except NameError:
return range(*r)
def get_list():
return random.sample(range(universe_size), size)
constructor = 'x={class_name}(values)'
simple_setup_constructor = 'x={class_name}(get_list());val=random.randint(0, universe_size)'
double_setup_constructor = 'x={class_name}(get_list()); y={class_name}(get_list())'
equal_setup_constructor = 'l=get_list();x={class_name}(l); y={class_name}(l)'
experiments = [
# Constructors
('range constructor', ('values=get_range()', constructor)),
('ordered list constructor', ('values=get_list(); values.sort()', constructor)),
('list constructor', ('values=get_list()', constructor)),
('ordered array constructor', ('l=get_list(); l.sort(); values=array.array("I", l)', constructor)),
('array constructor', ('values=array.array("I", get_list())', constructor)),
# Simple operations
('element addition', (simple_setup_constructor, 'x.add(val)')),
('element removal', (simple_setup_constructor, 'x.discard(val)')),
('membership test', (simple_setup_constructor, 'val in x')),
# Binary operations
('union', (double_setup_constructor, 'z=x|y')),
('intersection', (double_setup_constructor, 'z=x&y')),
('difference', (double_setup_constructor, 'z=x-y')),
('symmetric diference', (double_setup_constructor, 'z=x^y')),
('equality test', (equal_setup_constructor, 'x==y')),
('subset test', (equal_setup_constructor, 'x<=y')),
# Export
('conversion to list', (simple_setup_constructor, 'list(x)')),
('pickle dump & load', (simple_setup_constructor, 'pickle.loads(pickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL))')),
('"naive" conversion to array', (simple_setup_constructor, 'array.array("I", x)')),
('"optimized" conversion to array', (simple_setup_constructor, 'x.to_array()')),
# Items
('selection', (simple_setup_constructor, 'x[int(size/2)]')),
('contiguous slice', (simple_setup_constructor, 'x[int(size/4):int(3*size/4):1]')),
('slice', (simple_setup_constructor, 'x[int(size/4):int(3*size/4):3]')),
('small slice', (simple_setup_constructor, 'x[int(size/100):int(3*size/100):3]')),
]
exp_dict = dict(experiments)
def run(cls, op):
cls_name = classes[cls].__name__
setup = exp_dict[op][0].format(class_name=cls_name)
stmt = exp_dict[op][1].format(class_name=cls_name)
result = run_exp(stmt=stmt, setup=setup, number=nb_exp)
return result
def run_all():
df = DataFrame({
'operation': Series([], dtype='str'),
})
for cls in sorted(classes):
df[cls] = Series([], dtype='float')
for op, _ in experiments:
sys.stderr.write('experiment: %s\n' % op)
result = {'operation': op}
for cls in random.sample(list(classes), len(classes)):
result[cls] = run(cls, op)
df = df.append(result, ignore_index=True)
return df
if __name__ == '__main__':
df = run_all()
print()
if has_tabulate:
print(tabulate.tabulate(df, headers='keys', tablefmt='rst', showindex='never', floatfmt=".2e"))
else:
print(df)
| mit |
datapythonista/pandas | pandas/core/indexes/numeric.py | 1 | 12122 | from __future__ import annotations
from typing import (
Callable,
Hashable,
)
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_scalar,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
import pandas.core.common as com
from pandas.core.indexes.base import (
Index,
maybe_extract_name,
)
_num_index_shared_docs = {}
_num_index_shared_docs[
"class_descr"
] = """
Immutable sequence used for indexing and alignment. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
See Also
--------
Index : The base pandas Index type.
Notes
-----
An Index instance can **only** contain hashable objects.
"""
class NumericIndex(Index):
"""
Provide numeric type operations.
This is an abstract class.
"""
_index_descr_args = {
"klass": "NumericIndex",
"ltype": "integer or float",
"dtype": "inferred",
"extra": "",
}
_values: np.ndarray
_default_dtype: np.dtype
_dtype_validation_metadata: tuple[Callable[..., bool], str]
_is_numeric_dtype = True
_can_hold_strings = False
@cache_readonly
def _can_hold_na(self) -> bool:
if is_float_dtype(self.dtype):
return True
else:
return False
@cache_readonly
def _engine_type(self):
return {
np.int8: libindex.Int8Engine,
np.int16: libindex.Int16Engine,
np.int32: libindex.Int32Engine,
np.int64: libindex.Int64Engine,
np.uint8: libindex.UInt8Engine,
np.uint16: libindex.UInt16Engine,
np.uint32: libindex.UInt32Engine,
np.uint64: libindex.UInt64Engine,
np.float32: libindex.Float32Engine,
np.float64: libindex.Float64Engine,
}[self.dtype.type]
@cache_readonly
def inferred_type(self) -> str:
return {
"i": "integer",
"u": "integer",
"f": "floating",
}[self.dtype.kind]
def __new__(cls, data=None, dtype: Dtype | None = None, copy=False, name=None):
name = maybe_extract_name(name, data, cls)
subarr = cls._ensure_array(data, dtype, copy)
return cls._simple_new(subarr, name=name)
@classmethod
def _ensure_array(cls, data, dtype, copy: bool):
"""
Ensure we have a valid array to pass to _simple_new.
"""
cls._validate_dtype(dtype)
if not isinstance(data, (np.ndarray, Index)):
# Coerce to ndarray if not already ndarray or Index
if is_scalar(data):
raise cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data, dtype=dtype)
if issubclass(data.dtype.type, str):
cls._string_data_error(data)
dtype = cls._ensure_dtype(dtype)
if copy or not is_dtype_equal(data.dtype, dtype):
subarr = np.array(data, dtype=dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if subarr.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
subarr = np.asarray(subarr)
return subarr
@classmethod
def _validate_dtype(cls, dtype: Dtype | None) -> None:
if dtype is None:
return
validation_func, expected = cls._dtype_validation_metadata
if not validation_func(dtype):
raise ValueError(
f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
)
@classmethod
def _ensure_dtype(
cls,
dtype: Dtype | None,
) -> np.dtype | None:
"""Ensure int64 dtype for Int64Index, etc. Assumed dtype is validated."""
return cls._default_dtype
def __contains__(self, key) -> bool:
"""
Check if key is a float and has a decimal. If it has, return False.
"""
if not is_integer_dtype(self.dtype):
return super().__contains__(key)
hash(key)
try:
if is_float(key) and int(key) != key:
# otherwise the `key in self._engine` check casts e.g. 1.1 -> 1
return False
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
@doc(Index.astype)
def astype(self, dtype, copy=True):
if is_float_dtype(self.dtype):
dtype = pandas_dtype(dtype)
if needs_i8_conversion(dtype):
raise TypeError(
f"Cannot convert Float64Index to dtype {dtype}; integer "
"values are required for conversion"
)
elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype):
# TODO(jreback); this can change once we have an EA Index type
# GH 13149
arr = astype_nansafe(self._values, dtype=dtype)
return Int64Index(arr, name=self.name)
return super().astype(dtype, copy=copy)
# ----------------------------------------------------------------
# Indexing Methods
@doc(Index._should_fallback_to_positional)
def _should_fallback_to_positional(self) -> bool:
return False
@doc(Index._convert_slice_indexer)
def _convert_slice_indexer(self, key: slice, kind: str):
if is_float_dtype(self.dtype):
assert kind in ["loc", "getitem"]
# We always treat __getitem__ slicing as label-based
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
return super()._convert_slice_indexer(key, kind=kind)
@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
@doc(Index._convert_arr_indexer)
def _convert_arr_indexer(self, keyarr) -> np.ndarray:
if not is_unsigned_integer_dtype(self.dtype):
return super()._convert_arr_indexer(keyarr)
# Cast the indexer to uint64 if possible so that the values returned
# from indexing are also uint64.
dtype = None
if is_integer_dtype(keyarr) or (
lib.infer_dtype(keyarr, skipna=False) == "integer"
):
dtype = np.dtype(np.uint64)
return com.asarray_tuplesafe(keyarr, dtype=dtype)
# ----------------------------------------------------------------
@doc(Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = lib.no_default):
if not self._can_hold_na and values.dtype.kind == "f":
name = self._name if name is lib.no_default else name
# Ensure we are not returning an Int64Index with float data:
return Float64Index._simple_new(values, name=name)
return super()._shallow_copy(values=values, name=name)
def _convert_tolerance(self, tolerance, target):
tolerance = super()._convert_tolerance(tolerance, target)
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} must contain "
"numeric elements if it is list type"
)
else:
raise ValueError(
f"tolerance argument for {type(self).__name__} must be numeric "
f"if it is a scalar: {repr(tolerance)}"
)
return tolerance
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
# If we ever have BoolIndex or ComplexIndex, this may need to be tightened
return is_numeric_dtype(dtype)
@classmethod
def _assert_safe_casting(cls, data: np.ndarray, subarr: np.ndarray) -> None:
"""
Ensure incoming data can be represented with matching signed-ness.
Needed if the process of casting data from some accepted dtype to the internal
dtype(s) bears the risk of truncation (e.g. float to int).
"""
if is_integer_dtype(subarr.dtype):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
@property
def _is_all_dates(self) -> bool:
"""
Checks that all the labels are datetime objects.
"""
return False
def _format_native_types(
self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs
):
from pandas.io.formats.format import FloatArrayFormatter
if is_float_dtype(self.dtype):
formatter = FloatArrayFormatter(
self._values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
return super()._format_native_types(
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
**kwargs,
)
class IntegerIndex(NumericIndex):
"""
This is an abstract class for Int64Index, UInt64Index.
"""
@property
def asi8(self) -> np.ndarray:
# do not cache or you'll create a memory leak
warnings.warn(
"Index.asi8 is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
return self._values.view(self._default_dtype)
class Int64Index(IntegerIndex):
_index_descr_args = {
"klass": "Int64Index",
"ltype": "integer",
"dtype": "int64",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "int64index"
_engine_type = libindex.Int64Engine
_default_dtype = np.dtype(np.int64)
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
class UInt64Index(IntegerIndex):
_index_descr_args = {
"klass": "UInt64Index",
"ltype": "unsigned integer",
"dtype": "uint64",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "uint64index"
_engine_type = libindex.UInt64Engine
_default_dtype = np.dtype(np.uint64)
_dtype_validation_metadata = (is_unsigned_integer_dtype, "unsigned integer")
class Float64Index(NumericIndex):
_index_descr_args = {
"klass": "Float64Index",
"dtype": "float64",
"ltype": "float",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "float64index"
_engine_type = libindex.Float64Engine
_default_dtype = np.dtype(np.float64)
_dtype_validation_metadata = (is_float_dtype, "float")
| bsd-3-clause |
nelson-liu/scikit-learn | examples/linear_model/plot_ols.py | 104 | 1936 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
cyliustack/sofa | bin/sofa_preprocess.py | 1 | 83110 | import argparse
import csv
import glob
import itertools
import json
import multiprocessing as mp
import os
import platform
import re
import datetime
import subprocess
import sys
import warnings
from functools import partial
from operator import itemgetter
import cxxfilt
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sqlalchemy import create_engine
from sofa_config import *
from sofa_ml import hsg_v1, hsg_v2, swarms_to_sofatrace
from sofa_models import SOFATrace
from sofa_print import *
import random
sofa_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category"] # 12
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % ( 200, 200, rand())
def list_downsample(list_in, plot_ratio):
new_list = []
for i in range(len(list_in)):
if i % plot_ratio == 0:
# print("%d"%(i))
new_list.append(list_in[i])
return new_list
def trace_init():
t_begin = 0
deviceId = 0
metric = 0
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
name = ''
category = 0
trace = [
t_begin,
event,
metric,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
name,
category]
return trace
def list_to_csv_and_traces(cfg, _list, csvfile, _mode):
traces = []
if len(_list[1:]) > 0:
traces = pd.DataFrame(_list[1:])
traces.columns = sofa_fieldnames
_header = True if _mode == 'w' else False
traces.to_csv( cfg.logdir + csvfile,
mode=_mode,
header=_header,
index=False,
float_format='%.6f')
else:
print_warning(cfg, 'Empty list cannot be exported to %s!' % csvfile)
return traces
# 0/0 [004] 96050.733788: 1 bus-cycles: ffffffff8106315a native_write_msr_safe
# 0/0 [004] 96050.733788: 7 cycles: ffffffff8106315a native_write_msr_safe
# 359342/359342 2493492.850125: 1 bus-cycles: ffffffff8106450a native_write_msr_safe
# 359342/359342 2493492.850128: 1 cycles: ffffffff8106450a
# native_write_msr_safe
def cpu_trace_read(sample, cfg, t_offset, cpu_mhz_xp, cpu_mhz_fp):
fields = sample.split()
event = event_raw = 0
counts = 0
if re.match('\[\d+\]', fields[1]) is not None:
time = float(fields[2].split(':')[0])
func_name = '[%s]'%fields[4].replace('-','_') + fields[6] + fields[7]
counts = float(fields[3])
event_raw = 1.0 * int("0x01" + fields[5], 16)
else:
time = float(fields[1].split(':')[0])
func_name = '[%s]'%fields[3].replace('-','_') + fields[5] + fields[6]
counts = float(fields[2])
event_raw = 1.0 * int("0x01" + fields[4], 16)
if not cfg.absolute_timestamp:
time = time - cfg.time_base
t_begin = time + t_offset
t_end = time + t_offset
if len(cpu_mhz_xp) > 1:
duration = counts/(np.interp(t_begin, cpu_mhz_xp, cpu_mhz_fp)*1e6)
else:
duration = counts/(3000.0*1e6)
event = np.log10(event_raw)
if cfg.perf_events.find('cycles') == -1:
duration = np.log2(event_raw/1e14)
trace = [t_begin, # 0
event, # % 1000000 # 1
duration, # 2
-1, # 3
-1, # 4
0, # 5
0, # 6
-1, # 7
-1, # 8
int(fields[0].split('/')[0]), # 9
int(fields[0].split('/')[1]), # 10
func_name, # 11
0] # 12
return trace
def net_trace_read(packet, cfg, t_offset):
#21234 1562233011.469681 IP 192.168.88.88.56828 > 224.224.255.255.5400: UDP, length 851
#21235 1562233011.471352 IP 10.57.185.172.8554 > 192.168.88.88.53528: tcp 0
fields = packet.split()
time = float(fields[0])
if not cfg.absolute_timestamp:
time = time - cfg.time_base
t_begin = time + t_offset
t_end = time + t_offset
if fields[1] != 'IP':
return []
protocol = fields[5]
if protocol == 'UDP,':
payload = int(fields[7])
elif protocol == 'tcp':
payload = int(fields[6])
else:
return []
duration = float(payload / 128.0e6)
bandwidth = 128.0e6
pkt_src = 0
pkt_dst = 0
for i in range(4):
pkt_src = pkt_src + \
int(fields[2].split('.')[i]) * np.power(1000, 3 - i)
pkt_dst = pkt_dst + \
int(fields[4].split('.')[i]) * np.power(1000, 3 - i)
trace = [ t_begin,
payload * 100 + 17,
duration,
-1,
-1,
payload,
bandwidth,
pkt_src,
pkt_dst,
-1,
-1,
"network:%s:%d_to_%d_with_%d" % (protocol, pkt_src, pkt_dst, payload),
0
]
return trace
def cuda_api_trace_read(
record,
cfg,
indices,
n_cudaproc,
ts_rescale,
dt_rescale,
payload_unit,
t_offset):
values = record.replace('"', '').split(',')
api_name = '[CUDA_API]' + values[indices.index('Name')]
# print("kernel name = %s" % kernel_name)
time = float(values[indices.index('Start')]) / ts_rescale + t_offset
if not cfg.absolute_timestamp:
time = time - cfg.time_base
duration = float(values[indices.index('Duration')]) / dt_rescale
t_begin = time
t_end = time + duration
payload = 0
bandwidth = 0
pid = n_cudaproc
deviceId = -1
tid = stream_id = -1
pkt_src = pkt_dst = copyKind = 0
# print("%d:%d [%s] ck:%d, %lf,%lf: %d -> %d: payload:%d, bandwidth:%lf,
# duration:%lf "%(deviceId, streamId, kernel_name, copyKind,
# t_begin,t_end, pkt_src, pkt_dst, payload, bandwidth, duration))
trace = [t_begin,
payload * 100 + 17,
duration,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
api_name,
0]
return trace
def gpu_trace_read(
record,
cfg,
indices,
n_cudaproc,
ts_rescale,
dt_rescale,
payload_unit,
t_offset):
values = record.replace('"', '').split(',')
kernel_name = values[indices.index('Name')]
time = float(values[indices.index('Start')]) / ts_rescale + t_offset
if not cfg.absolute_timestamp:
time = time - cfg.time_base
duration = float(values[indices.index('Duration')]) / dt_rescale
t_begin = time
t_end = time + duration
try:
payload = int(float(values[indices.index('Size')]) * payload_unit)
except BaseException:
payload = 0
try:
bandwidth = float(values[indices.index('Throughput')])
except BaseException:
bandwidth = 0
pid = n_cudaproc
deviceId = -1
try:
deviceId = int(float(values[indices.index('Context')]))
except BaseException:
deviceId = -1
tid = stream_id = -1
try:
tid = streamId = int(float(values[indices.index('Stream')]))
except BaseException:
tid = streamId = -1
pkt_src = pkt_dst = copyKind = 0
if kernel_name.find('HtoD') != -1:
copyKind = 1
pkt_src = 0
pkt_dst = deviceId
kernel_name = "CUDA_COPY_H2D_%dB" % (payload)
elif kernel_name.find('DtoH') != -1:
copyKind = 2
pkt_src = deviceId
pkt_dst = 0
kernel_name = "CUDA_COPY_D2H_%dB" % (payload)
elif kernel_name.find('DtoD') != -1:
copyKind = 8
pkt_src = deviceId
pkt_dst = deviceId
kernel_name = "CUDA_COPY_D2D_%dB" % (payload)
elif kernel_name.find('PtoP') != -1:
copyKind = 10
try:
pkt_src = int(values[indices.index('Src Ctx')])
except BaseException:
pkt_src = 0
try:
pkt_dst = int(values[indices.index('Dst Ctx')])
except BaseException:
pkt_dst = 0
kernel_name = "[CUDA_COPY_P2P]from_gpu%d_to_gpu%d_%dB" % (pkt_src, pkt_dst, payload)
else:
copyKind = 0
if deviceId != -1:
kernel_name = '[gpu%d]'%deviceId + kernel_name
trace = [t_begin,
payload * 100 + 17,
duration,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
kernel_name,
0]
return trace
def traces_to_json(traces, path, cfg):
if len(traces) == 0:
print_warning(cfg,"Empty traces!")
return
with open(path, 'w') as f:
for trace in traces:
if len(trace.data) > 0:
f.write(trace.name + " = ")
trace.data.rename(
columns={
trace.x_field: 'x',
trace.y_field: 'y'},
inplace=True)
sofa_series = {
"name": trace.title,
"color": trace.color,
"data": json.loads(
trace.data.to_json(
orient='records'))}
json.dump(sofa_series, f)
trace.data.rename(
columns={
'x': trace.x_field,
'y': trace.y_field},
inplace=True)
f.write("\n\n")
f.write("sofa_traces = [ ")
for trace in traces:
if len(trace.data) > 0:
f.write(trace.name + ",")
f.write(" ]")
def sofa_preprocess(cfg):
print_main_progress('SOFA preprocessing...')
cfg.time_base = 0
t_glb_gpu_base = 0
logdir = cfg.logdir
with_sudo = ''
if int(os.system('command -v sudo 1> /dev/null')) == 0:
with_sudo = 'sudo '
with open(logdir + 'misc.txt', 'r') as f:
lines = f.readlines()
if len(lines) == 4:
cfg.pid = int(lines[3].split()[1])
else:
print_warning(cfg,'Incorrect misc.txt content. Some profiling information may not be available.')
cid = ''
container_pid = ''
if os.path.isfile(cfg.logdir+'/cidfile.txt'):
with open(cfg.logdir+'/cidfile.txt') as cidfile:
cid = cidfile.readlines()[0]
subprocess.call(with_sudo + 'umount %s/container_root ' % (cfg.logdir), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.call('rm -rf %s/container_root ' % (cfg.logdir), shell=True )
subprocess.call('mkdir -p %s/container_root ' % (cfg.logdir), shell=True )
subprocess.call(with_sudo + 'bindfs /proc/$(docker inspect --format {{.State.Pid}} %s)/root %s/container_root ' % (cid, cfg.logdir), shell=True )
subprocess.call(with_sudo + 'chown $(whoami) %s/*.nvvp ' % (cfg.logdir), shell=True)
if int(os.system('command -v perf 1> /dev/null')) == 0:
with open(logdir + 'perf.script', 'w') as logfile:
if os.path.isfile('%s/container_root' % logdir):
option_container_symbols = ' --symfs %s/container_root ' % logdir
else:
option_container_symbols = ''
option_kernel_symbols = ' --kallsym %s/kallsyms ' % logdir
subprocess.call('perf script -F time,pid,tid,event,ip,sym,dso,symoff,period,brstack,brstacksym -i %s/perf.data %s %s'
% (logdir, option_kernel_symbols, option_container_symbols),
shell=True, stdout=logfile, stderr=subprocess.DEVNULL)
with open(logdir + 'sofa_time.txt') as f:
lines = f.readlines()
cfg.time_base = float(lines[0]) + cfg.cpu_time_offset
print_info(cfg,'Time offset applied to timestamp (s):' + str(cfg.cpu_time_offset))
print_info(cfg,'SOFA global time base (s):' + str(cfg.time_base))
cpu_mhz_xp = [0.0]
cpu_mhz_fp = [3000.0]
try:
with open(logdir + 'cpuinfo.txt') as f:
lines = f.readlines()
for line in lines:
fields = line.split()
timestamp = float(fields[0])
mhz = float(fields[1])
cpu_mhz_xp.append(timestamp)
cpu_mhz_fp.append(mhz)
except:
print_warning(cfg,'no cpuinfo file is found, default cpu MHz = %lf'%(cpu_mhz_fp[0]))
net_traces = []
cpu_traces = []
cpu_traces_viz = []
blk_d_traces = []
blk_traces = []
vm_usr_traces = []
vm_sys_traces = []
vm_bi_traces = []
vm_bo_traces = []
vm_in_traces = []
vm_cs_traces = []
vm_wa_traces = []
vm_st_traces = []
mpstat_traces = []
diskstat_traces = []
tx_traces = []
rx_traces = []
strace_traces = []
pystacks_traces = []
nvsmi_sm_traces = []
nvsmi_mem_traces = []
nvsmi_enc_traces = []
nvsmi_dec_traces = []
gpu_traces = []
gpu_traces_viz = []
gpu_api_traces = []
gpu_api_traces_viz = []
gpu_kernel_traces = []
gpu_memcpy_traces = []
gpu_memcpy2_traces = []
gpu_memcpy_h2d_traces = []
gpu_memcpy_d2h_traces = []
gpu_memcpy_d2d_traces = []
gpu_glb_kernel_traces = []
gpu_glb_memcpy_traces = []
gpu_glb_memcpy2_traces = []
gpu_glb_memcpy_h2d_traces = []
gpu_glb_memcpy_d2h_traces = []
gpu_glb_memcpy_d2d_traces = []
gpulog_mode = 'w'
gpulog_header = 'True'
cpu_count = mp.cpu_count()
if os.path.isfile('%s/mpstat.txt' % logdir):
with open('%s/mpstat.txt' % logdir) as f:
mpstat = np.genfromtxt(logdir+'/mpstat.txt', delimiter=',', invalid_raise=False )
header = mpstat[0]
mpstat = mpstat[1:]
mpstat_list = []
mpstat_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
n_cores = int(mpstat[:,1].max() + 1)
stride = n_cores + 1
usr_sys_list = []
for i in range(len(mpstat)):
if len(mpstat[i]) < len(header):
continue
if i <= stride:
continue
#time, cpu, user,nice, system, idle, iowait, irq, softirq
core = mpstat[i,1]
d_mp = mpstat[i,:] - mpstat[i-stride,:]
d_mp_total = d_mp[2] + d_mp[4] + d_mp[5] + d_mp[6] + d_mp[7]
if d_mp_total == 0 :
print_info(cfg, 'No increases in mpstat values')
continue
d_mp_usr = d_mp[2] * 100 / float(d_mp_total)
d_mp_sys = d_mp[4] * 100 / float(d_mp_total)
d_mp_idl = d_mp[5] * 100 / float(d_mp_total)
d_mp_iow = d_mp[6] * 100 / float(d_mp_total)
d_mp_irq = d_mp[7] * 100 / float(d_mp_total)
cpu_time = (d_mp_total - d_mp[5]) * 0.01
t_begin = mpstat[i,0]
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
if mpstat[i,1] != -1:
deviceId = core
metric = cpu_time
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
mpstat_info = 'mpstat_core%d (usr|sys|idl|iow|irq): |%3d|%3d|%3d|%3d|%3d|' % (core, d_mp_usr, d_mp_sys, d_mp_idl, d_mp_iow, d_mp_irq)
trace_usr = [
t_begin,
event,
metric,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
mpstat_info,
0]
mpstat_list.append(trace_usr)
else:
usr_sys_list.append(tuple([t_begin, d_mp_usr, d_mp_sys]))
usr_sys_table = pd.DataFrame(usr_sys_list, columns=['time', 'usr', 'sys'])
usr_sys_table.to_csv('%s/usr_sys.csv' %logdir, index=False)
mpstat_traces = list_to_csv_and_traces(cfg, mpstat_list, 'mpstat.csv', 'w')
if os.path.isfile('%s/diskstat.txt' % logdir):
with open('%s/diskstat.txt' % logdir) as f:
diskstats = f.readlines()
diskstat_list = []
diskstat_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
all_diskstat_vector = []
tmp_list = []
for diskstat in diskstats:
m = diskstat[:-1]
m = m.split(',')
tmp_list.append(m)
devs = list(set(map(lambda x: x[1], tmp_list)))
n_dev = len(devs)
for i in range(len(diskstats)):
if i < n_dev:
continue
m = diskstats[i][:-1]
m = m.split(',')
dev = m[1]
m_last = diskstats[i-n_dev][:-1]
m_last = m_last.split(',')
# get sector size
secsize = 512
try:
f = open('/sys/block/'+dev+'/queue/hw_sector_size')
s = f.readline()
s = re.match("\d+", s)
secsize = int(s.group())
except:
pass
d_read = int(m[2]) - int(m_last[2])
d_read *= secsize
d_write = int(m[3]) - int(m_last[3])
d_write *= secsize
d_disk_total = d_read + d_write #total bytes
t_begin = float(m[0])
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
d_duration = float(m[0]) - float(m_last[0])
# MB/s
d_throughput = round((d_disk_total/d_duration)/float(1024 ** 2),2)
read_bandwidth = d_read / d_duration
write_bandwidth = d_write / d_duration
total_bandwidth = d_disk_total / d_duration
# iops
r_io = int(m[4]) - int(m_last[4])
w_io = int(m[5]) - int(m_last[5])
r_iops = int(r_io / d_duration)
w_iops = int(w_io / d_duration)
iops = int((r_io + w_io) / d_duration)
# await time per io
r_time, w_time, await_time = 0, 0, 0
if r_io > 0:
r_time = (int(m[6]) - int(m_last[6])) / r_io
if w_io > 0:
w_time = (int(m[7]) - int(m_last[7])) / w_io
if (r_io+w_io) > 0:
await_time = ((int(m[6]) - int(m_last[6])) + (int(m[7]) - int(m_last[7]))) / (r_io + w_io)
diskstat_vector = [t_begin,
m[1],
r_iops,
w_iops,
iops,
r_time,
w_time,
await_time,
read_bandwidth,
write_bandwidth,
total_bandwidth
]
all_diskstat_vector.append(tuple(diskstat_vector))
event = -1
duration = d_duration
deviceId = m[1]
copyKind = -1
payload = d_disk_total
bandwidth = d_throughput
pkt_src = -1
pkt_dst = -1
pid = -1
tid = -1
diskstat_info = 'diskstat_dev:%s (read|write): |%3d|%3d| bytes' % (m[1], d_read, d_write)
trace = [
t_begin,
event,
duration,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
diskstat_info,
0]
diskstat_list.append(trace)
diskstat_traces = pd.DataFrame(diskstat_list[1:], columns = sofa_fieldnames)
diskstat_table = pd.DataFrame(all_diskstat_vector, columns = ['time', 'dev', 'r_iops', 'w_iops', 'iops',
'r_time', 'w_time', 'await_time', 'd_read', 'd_write', 'd_disk_total'])
for dev in devs:
device = (diskstat_table['dev'] == dev)
if diskstat_table[device]['d_disk_total'].max() == 0:
diskstat_table = diskstat_table[(diskstat_table.dev != dev)]
diskstat_traces = diskstat_traces[(diskstat_traces.deviceId != dev)]
diskstat_traces.to_csv('%s/diskstat.csv' %logdir, index=False)
diskstat_table.to_csv('%s/diskstat_vector.csv' %logdir, index=False)
if cfg.diskstat_filters:
print(cfg.diskstat_filters)
de = (diskstat_table['dev'] == cfg.diskstat_filters)
diskstat_table = diskstat_table[de]
diskstat_table.to_csv('%s/diskstat_vector_ui.csv' %logdir, index=False)
# dev cpu sequence timestamp pid event operation start_block+number_of_blocks process
# <mjr,mnr> number
# 8,0 6 1 0.000000000 31479 A W 691248304 + 1024 <- (8,5) 188175536
# 8,0 6 2 0.000001254 31479 Q W 691248304 + 1024 [dd]
# 8,0 6 3 0.000003353 31479 G W 691248304 + 1024 [dd]
# 8,0 6 4 0.000005004 31479 I W 691248304 + 1024 [dd]
# 8,0 6 5 0.000006175 31479 D W 691248304 + 1024 [dd]
# 8,0 2 1 0.001041752 0 C W 691248304 + 1024 [0]
if os.path.isfile('%s/blktrace.out' % logdir):
subprocess.call('btt -i %s/blktrace.out -B offset > %s/btt.txt' %(logdir, logdir), shell=True)
subprocess.call('mv offset*c.dat %s/offset_all.txt' % logdir, shell=True)
if os.path.isfile('%s/blktrace.txt' % logdir):
subprocess.call("sudo chown $(id -un) %s/blktrace.txt" % logdir, shell=True)
with open('%s/blktrace.txt' % logdir) as f:
lines = f.readlines()
print_info(cfg,"Length of blktrace = %d" % len(lines))
if len(lines) > 0:
blktrace_d_dict = {}
blktrace_d_list = []
blktrace_list = []
blktrace_d_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
blktrace_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
record_error_flag = 0
t = 0
for i in range(len(lines)):
# filter some total calculate information in the below of blktrace.txt file
if len(lines[i]) > 50 and "Read" not in lines[i] and "CPU" not in lines[i] and "IO unplugs" not in lines[i]:
fields = lines[i].split()
blktrace_dev = fields[0]
blktrace_cpu = fields[1]
blktrace_sequence_number = fields[2]
blktrace_timestamp = float(fields[3])
blktrace_pid = fields[4]
blktrace_event = fields[5]
blktrace_operation = fields[6]
try:
blktrace_start_block = int(fields[7])
except:
blktrace_start_block = 0
record_error_flag = 1
pass
# the two column blktrace_block_size and blktrace_process is for future used
if len(fields) > 10:
blktrace_block_size = fields[9]
blktrace_process = fields[10]
t_begin = blktrace_timestamp
deviceId = cpuid = blktrace_cpu
event = blktrace_event
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = blktrace_pid
name_info = 'starting_block='+str(blktrace_start_block)
trace = [
t_begin,
event,
blktrace_start_block,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
name_info,
cpuid]
if 'D' is event:
blktrace_d_list.append(trace)
blktrace_d_dict[blktrace_start_block] = trace
if 'C' is event:
try:
d_event = blktrace_d_dict[blktrace_start_block]
except:
continue
time_consume = float(blktrace_timestamp - d_event[0])
name_info = 'latency=%0.6f' % float(time_consume)
trace = [
d_event[0],
event,
float(time_consume),
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
name_info,
cpuid]
blktrace_list.append(trace)
blk_d_traces = list_to_csv_and_traces(
cfg, blktrace_d_list, 'blktrace.csv', 'w')
blk_traces = list_to_csv_and_traces(
cfg, blktrace_list, 'blktrace.csv', 'a')
if record_error_flag == 1 :
print_warning(cfg,'blktrace maybe record failed!')
# procs -----------------------memory---------------------- ---swap-- -
# r b swpd free buff cache si so bi bo in cs us sy id wa st
# 2 0 0 400091552 936896 386150912 0 0 3 18 0 1 5 0 95 0 0
# ============ Preprocessing VMSTAT Trace ==========================
if os.path.isfile('%s/vmstat.txt' % logdir):
with open('%s/vmstat.txt' % logdir) as f:
lines = f.readlines()
print_info(cfg,"Length of vmstat_traces = %d" % len(lines))
if len(lines) > 0:
vm_usr_list = []
vm_sys_list = []
vm_bi_list = []
vm_bo_list = []
vm_in_list = []
vm_cs_list = []
vm_wa_list = []
vm_st_list = []
vm_usr_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_sys_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_bi_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_bo_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_in_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_cs_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_wa_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
vm_st_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
t = 0
for i in range(len(lines)):
if lines[i].find('procs') == - \
1 and lines[i].find('swpd') == -1:
fields = lines[i].split()
if len(fields) < 17:
continue
vm_r = float(fields[0]) + 1e-5
vm_b = float(fields[1]) + 1e-5
vm_sw = float(fields[2]) + 1e-5
vm_fr = float(fields[3]) + 1e-5
vm_bu = float(fields[4]) + 1e-5
vm_ca = float(fields[5]) + 1e-5
vm_si = float(fields[6]) + 1e-5
vm_so = float(fields[7]) + 1e-5
vm_bi = float(fields[8]) + 1e-5
vm_bo = float(fields[9]) + 1e-5
vm_in = float(fields[10]) + 1e-5
vm_cs = float(fields[11]) + 1e-5
vm_usr = float(fields[12]) + 1e-5
vm_sys = float(fields[13]) + 1e-5
vm_idl = float(fields[14]) + 1e-5
vm_wa = float(fields[15]) + 1e-5
vm_st = float(fields[16]) + 1e-5
if cfg.absolute_timestamp:
t_begin = t + cfg.time_base
else:
t_begin = t
deviceId = cpuid = -1
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
vmstat_info = 'r=' + str(int(vm_r)) + '|'\
+ 'b=' + str(int(vm_b)) + '|'\
+ 'sw=' + str(int(vm_sw)) + '|'\
+ 'fr=' + str(int(vm_fr)) + '|'\
+ 'bu=' + str(int(vm_bu)) + '|'\
+ 'ca=' + str(int(vm_ca)) + '|'\
+ 'si=' + str(int(vm_si)) + '|'\
+ 'so=' + str(int(vm_so)) + '|'\
+ 'bi=' + str(int(vm_bi)) + '|'\
+ 'bo=' + str(int(vm_bo)) + '|'\
+ 'in=' + str(int(vm_in)) + '|'\
+ 'cs=' + str(int(vm_cs)) + '|'\
+ 'usr=' + str(int(vm_usr)) + '|'\
+ 'sys=' + str(int(vm_sys)) + '|'\
+ 'idl=' + str(int(vm_idl)) + '|'\
+ 'wa=' + str(int(vm_wa)) + '|'\
+ 'st=' + str(int(vm_st))
trace = [
t_begin,
event,
vm_bi,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_bi_list.append(trace)
trace = [
t_begin,
event,
vm_bo,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_bo_list.append(trace)
trace = [
t_begin,
event,
vm_in,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_in_list.append(trace)
trace = [
t_begin,
event,
vm_cs,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_cs_list.append(trace)
trace = [
t_begin,
event,
vm_wa,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_wa_list.append(trace)
trace = [
t_begin,
event,
vm_st,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_st_list.append(trace)
trace = [
t_begin,
event,
vm_usr,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_usr_list.append(trace)
trace = [
t_begin,
event,
vm_sys,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
vmstat_info,
cpuid]
vm_sys_list.append(trace)
t = t + 1
vm_bi_traces = list_to_csv_and_traces(
cfg, vm_bi_list, 'vmstat.csv', 'w')
vm_bo_traces = list_to_csv_and_traces(
cfg, vm_bo_list, 'vmstat.csv', 'a')
vm_in_traces = list_to_csv_and_traces(
cfg, vm_in_list, 'vmstat.csv', 'a')
vm_cs_traces = list_to_csv_and_traces(
cfg, vm_cs_list, 'vmstat.csv', 'a')
vm_wa_traces = list_to_csv_and_traces(
cfg, vm_wa_list, 'vmstat.csv', 'a')
vm_st_traces = list_to_csv_and_traces(
cfg, vm_st_list, 'vmstat.csv', 'a')
vm_usr_traces = list_to_csv_and_traces(
cfg, vm_usr_list, 'vmstat.csv', 'a')
vm_sys_traces = list_to_csv_and_traces(
cfg, vm_sys_list, 'vmstat.csv', 'a')
# timestamp, name, index, utilization.gpu [%], utilization.memory [%]
# 2019/05/16 16:49:04.650, GeForce 940MX, 0, 0 %, 0 %
if os.path.isfile('%s/nvsmi_query.txt' % logdir):
with open('%s/nvsmi_query.txt' % logdir) as f:
next(f)
lines = f.readlines()
nvsmi_query_has_data = True
for line in lines:
if line.find('failed') != -1 or line.find('Failed') != -1:
nvsmi_query_has_data = False
print_warning(cfg,'No nvsmi query data.')
break
if nvsmi_query_has_data:
print_info(cfg,"Length of nvsmi_query_traces = %d" % len(lines))
nvsmi_sm_list = []
nvsmi_mem_list = []
nvsmi_sm_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
nvsmi_mem_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
for i in range(len(lines)):
fields = lines[i].split(',')
nv_time = fields[0]
nv_time = datetime.datetime.strptime(nv_time, '%Y/%m/%d %H:%M:%S.%f').timestamp() + cfg.nvsmi_time_zone * 3600
nvsmi_id = int(fields[2])
nvsmi_sm = int(fields[3][:-2])
nvsmi_mem = int(fields[4][:-2])
# nvtime
t_begin = nv_time
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
deviceId = cpuid = nvsmi_id
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
sm_info = "GPUID_sm=%d_%d" % (nvsmi_id, nvsmi_sm)
mem_info = "GPUID_mem=%d_%d" % (nvsmi_id, nvsmi_mem)
trace = [
t_begin,
0,
nvsmi_sm,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
sm_info,
cpuid]
nvsmi_sm_list.append(trace)
trace = [
t_begin,
1,
nvsmi_mem,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
mem_info,
cpuid]
nvsmi_mem_list.append(trace)
if len(nvsmi_sm_list)>1:
nvsmi_sm_traces = list_to_csv_and_traces(cfg, nvsmi_sm_list, 'nvsmi_trace.csv', 'w')
nvsmi_mem_traces = list_to_csv_and_traces(cfg, nvsmi_mem_list, 'nvsmi_trace.csv', 'a')
# gpu sm mem enc dec
# Idx % % % %
# 0 0 0 0 0
# 1 0 0 0 0
# 2 0 0 0 0
if os.path.isfile('%s/nvsmi.txt' % logdir):
with open('%s/nvsmi.txt' % logdir) as f:
lines = f.readlines()
nvsmi_has_data = True
for line in lines:
if line.find('failed') != -1 or line.find('Failed') != -1:
nvsmi_has_data = False
print_warning(cfg,'No nvsmi data.')
break
if nvsmi_has_data:
print_info(cfg,"Length of nvsmi_traces = %d" % len(lines))
nvsmi_enc_list = []
nvsmi_dec_list = []
nvsmi_enc_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
nvsmi_dec_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
t = 0
for i in range(len(lines)):
if lines[i].find('gpu') == -1 and lines[i].find('Idx') == -1:
fields = lines[i].split()
if len(fields) < 5:
continue
nvsmi_id = int(fields[0])
if fields[3] == '-':
nvsmi_enc = int(0)
else:
nvsmi_enc = int(fields[3])
if fields[4] == '-':
nvsmi_dec = int(0)
else:
nvsmi_dec = int(fields[4])
if cfg.absolute_timestamp:
t_begin = t + cfg.time_base
else:
t_begin = t
deviceId = cpuid = nvsmi_id
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
enc_info = "GPUID_enc=%d_%d" % (nvsmi_id, nvsmi_enc)
dec_info = "GPUID_dec=%d_%d" % (nvsmi_id, nvsmi_dec)
trace = [
t_begin,
2,
nvsmi_enc,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
enc_info,
cpuid]
if t > 3 :
nvsmi_enc_list.append(trace)
trace = [
t_begin,
3,
nvsmi_dec,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
dec_info,
cpuid]
if t > 3 :
nvsmi_dec_list.append(trace)
if nvsmi_id == 0:
t = t + 1
if len(nvsmi_enc_list)>1:
cfg.nvsmi_data = True
nvsmi_enc_traces = list_to_csv_and_traces(cfg, nvsmi_enc_list, 'nvsmi_trace.csv', 'a')
nvsmi_dec_traces = list_to_csv_and_traces(cfg, nvsmi_dec_list, 'nvsmi_trace.csv', 'a')
else:
print_warning(cfg,"Program exectution time is fewer than 3 seconds, so nvsmi trace analysis will not be displayed.")
# ============ Preprocessing Network Trace ==========================
if os.path.isfile('%s/sofa.pcap' % logdir):
with open(logdir + 'net.tmp', 'w') as f:
subprocess.check_call(
["tcpdump", "-q", "-n", "-tt", "-r",
"%s/sofa.pcap"%logdir ], stdout=f, stderr=subprocess.DEVNULL)
with open(logdir + 'net.tmp') as f:
packets = lines = f.readlines()
print_info(cfg,"Length of net_traces = %d" % len(packets))
if packets:
with mp.Pool(processes=cpu_count) as pool:
res = pool.map(
partial(
net_trace_read,
cfg=cfg,
t_offset=0),
packets)
res_viz = list_downsample(res, cfg.plot_ratio)
net_traces = pd.DataFrame(res_viz)
net_traces.columns = sofa_fieldnames
net_traces.to_csv(
logdir + 'nettrace.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
# ============ Apply for Network filter =====================
if cfg.net_filters:
filtered_net_groups = []
packet_not_zero = net_traces['payload'] > 0
start = (net_traces['pkt_src'] == float(cfg.net_filters[0]))
for filter in cfg.net_filters[1:]:
end = (net_traces['pkt_dst'] == float(filter))
group = net_traces[packet_not_zero & start & end]
filtered_net_groups.append({'group': group,
'color': 'rgba(%s,%s,%s,0.8)' %(random.randint(0,255),random.randint(0,255),random.randint(0,255)),
'keyword': 'to_%s' %filter})
end = (net_traces['pkt_dst'] == float(cfg.net_filters[0]))
for filter in cfg.net_filters[1:]:
start = (net_traces['pkt_src'] == float(filter))
group = net_traces[packet_not_zero & start & end]
filtered_net_groups.append({'group': group,
'color': 'rgba(%s,%s,%s,0.8)' %(random.randint(0,255),random.randint(0,255),random.randint(0,255)),
'keyword': 'from_%s' %filter})
else:
print_warning(cfg,"no network traces were recorded.")
# ============ Preprocessing Network Bandwidth Trace ============
if os.path.isfile('%s/netstat.txt' % logdir):
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
if lines:
tmp_time = float(lines[0].split(',')[0])
tmp_tx = int(lines[0].split(',')[1])
tmp_rx = int(lines[0].split(',')[2])
all_time = []
all_tx = []
all_rx = []
tx_list = []
rx_list = []
bandwidth_result = pd.DataFrame([], columns=['time', 'tx_bandwidth', 'rx_bandwidth'])
for line in lines[1:]:
time = float(line.split(',')[0])
tx = int(line.split(',')[1])
rx = int(line.split(',')[2])
tx_bandwidth = (tx - tmp_tx) / (time - tmp_time)
rx_bandwidth = (rx - tmp_rx) / (time - tmp_time)
#sofa_fieldnames = [
# "timestamp", # 0
# "event", # 1
# "duration", # 2
# "deviceId", # 3
# "copyKind", # 4
# "payload", # 5
# "bandwidth", # 6
# "pkt_src", # 7
# "pkt_dst", # 8
# "pid", # 9
# "tid", # 10
# "name", # 11
# "category"] # 12
t_begin = time
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
trace = [
t_begin, # timestamp
0, # event
-1,
-1,
-1,
-1,
tx_bandwidth, # tx bandwidth
-1,
-1,
-1,
-1,
"network_bandwidth_tx(bytes):%d" % tx_bandwidth,
0
]
tx_list.append(trace)
trace = [
t_begin, # timestamp
1, # event
-1,
-1,
-1,
-1,
rx_bandwidth, # rx bandwidth
-1,
-1,
-1,
-1,
"network_bandwidth_rx(bytes):%d" % rx_bandwidth,
0
]
rx_list.append(trace)
# for visualize
all_time.append(time)
all_tx.append(tx_bandwidth)
all_rx.append(rx_bandwidth)
# for pandas
result = [t_begin, tx_bandwidth, rx_bandwidth]
tmp_bandwidth_result = pd.DataFrame([result], columns=['time', 'tx_bandwidth', 'rx_bandwidth'])
bandwidth_result = pd.concat([bandwidth_result, tmp_bandwidth_result], ignore_index=True)
# prepare for next round loop
tmp_time = time
tmp_tx = tx
tmp_rx = rx
bandwidth_result.to_csv('%s/netbandwidth.csv' %logdir, header=True, index=False)
tx_traces = pd.DataFrame(tx_list, columns = sofa_fieldnames)
tx_traces.to_csv(
logdir + 'netstat.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
rx_traces = pd.DataFrame(rx_list, columns = sofa_fieldnames)
rx_traces.to_csv(
logdir + 'netstat.csv',
mode='a',
header=False,
index=False,
float_format='%.6f')
# ============ Preprocessing GPU Trace ==========================
num_cudaproc = 0
filtered_gpu_groups = []
indices = []
for nvvp_filename in glob.glob(logdir + "gputrace*[0-9].nvvp"):
print_progress(cfg,"Read " + nvvp_filename + " by nvprof -- begin")
if nvvp_filename.find('001001') != -1:
nvvp_filename_base = nvvp_filename.split('/')[-1]
subprocess.call('nvidia-docker exec %s /usr/local/cuda/bin/nvprof --csv --print-gpu-trace -i /sofalog/%s --log-file /sofalog/gputrace.tmp' % (cid, nvvp_filename_base), shell=True)
subprocess.call(with_sudo + 'chown $(whoami) %s/gputrace.tmp ' % (cfg.logdir), shell=True)
else:
with open(logdir + "gputrace.tmp", "w") as f:
subprocess.call(["nvprof", "--csv", "--print-gpu-trace", "-i", nvvp_filename], stderr=f)
#Automatically retrieve the timestamp of the first CUDA activity(e.g. kernel, memory op, etc..)
engine = create_engine("sqlite:///"+nvvp_filename)
t_glb_gpu_bases = []
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_MEMSET',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO MEMSET')
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_MEMCPY',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO MEMCPY')
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO CONCURRENT KERNEL')
try:
t_glb_gpu_bases.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_KERNEL',engine)).iloc[0]['start'])
except BaseException:
print_info(cfg,'NO KERNEL')
if len(t_glb_gpu_bases) > 0:
t_glb_gpu_base = sorted(t_glb_gpu_bases)[0]*1.0/1e+9
else:
print_warning(cfg,"There is no data in tables of NVVP file.")
print_info(cfg,"Timestamp of the first GPU trace = " + str(t_glb_gpu_base))
print_progress(cfg,"Read " + nvvp_filename + " by nvprof -- end")
num_cudaproc = num_cudaproc + 1
with open(logdir + 'gputrace.tmp') as f:
records = f.readlines()
if len(records) > 0 and records[1].split(',')[0] == '"Start"':
indices = records[1].replace(
'"', '').replace(
'\n', '').split(',')
# ms,ms,,,,,,,,B,B,MB,GB/s,,,,
payload_unit = 1
if records[2].split(',')[11] == 'GB':
payload_unit = np.power(1024,3)
elif records[2].split(',')[11] == 'MB':
payload_unit = np.power(1024,2)
elif records[2].split(',')[11] == 'KB':
payload_unit = np.power(1024,1)
elif records[2].split(',')[11] == 'B':
payload_unit = 1
else:
print_info(cfg,"The payload unit in gputrace.tmp was not recognized!")
sys.exit(1)
ts_rescale = 1.0
if records[2].split(',')[0] == 'ms':
ts_rescale = 1.0e3
elif records[2].split(',')[0] == 'us':
ts_rescale = 1.0e6
dt_rescale = 1.0
if records[2].split(',')[1] == 'ms':
dt_rescale = 1.0e3
elif records[2].split(',')[1] == 'us':
dt_rescale = 1.0e6
records = records[3:]
print_info(cfg,"Length of gpu_traces = %d" % len(records))
t_base = float(records[0].split(',')[0])
with mp.Pool(processes=cpu_count) as pool:
res = pool.map(
partial(
gpu_trace_read,
cfg=cfg,
indices=indices,
ts_rescale=ts_rescale,
dt_rescale=dt_rescale,
payload_unit=payload_unit,
n_cudaproc=num_cudaproc,
t_offset=t_glb_gpu_base -
t_base),
records)
gpu_traces = pd.DataFrame(res)
gpu_traces.columns = sofa_fieldnames
res_viz = list_downsample(res, cfg.plot_ratio)
gpu_traces_viz = pd.DataFrame(res_viz)
gpu_traces_viz.columns = sofa_fieldnames
gpu_traces.to_csv(
logdir + 'gputrace.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
# Apply filters for GPU traces
df_grouped = gpu_traces.groupby('name')
for filter in cfg.gpu_filters:
group = gpu_traces[gpu_traces['name'].str.contains(
filter.keyword)]
filtered_gpu_groups.append({'group': group, 'color': filter.color,
'keyword': filter.keyword})
else:
print_warning(cfg,
"gputrace existed, but no kernel traces were recorded.")
os.system('cat %s/gputrace.tmp' % logdir)
# ============ Preprocessing GPU API Trace ==========================
if cfg.cuda_api_tracing:
num_cudaproc = 0
indices = []
for nvvp_filename in glob.glob(logdir + "gputrace*[0-9].nvvp"):
print_progress(cfg,"Read " + nvvp_filename + " for API traces by nvprof -- begin")
with open(logdir + "cuda_api_trace.tmp", "w") as f:
subprocess.call(["nvprof", "--csv", "--print-api-trace", "-i", nvvp_filename], stderr=f)
#Automatically retrieve the timestamp of the first CUDA activity(e.g. kernel, memory op, etc..)
engine = create_engine("sqlite:///"+nvvp_filename)
t_glb_gpu_bases = []
first_corid = 1
try:
t_glb_gpu_bases.append((pd.read_sql_table('CUPTI_ACTIVITY_KIND_RUNTIME',engine)).iloc[0]['start'])
first_corid = (pd.read_sql_table('CUPTI_ACTIVITY_KIND_RUNTIME',engine)).iloc[0]['correlationId']
except BaseException:
print_info(cfg,'NO RUNTIME')
if len(t_glb_gpu_bases) > 0:
t_glb_gpu_base = sorted(t_glb_gpu_bases)[0]*1.0/1e+9
else:
print_warning(cfg,"There is no data in tables of NVVP file.")
print_info(cfg,"Timestamp of the first CUDA API trace = " + str(t_glb_gpu_base))
print_progress(cfg,"Read " + nvvp_filename + " by nvprof -- end")
num_cudaproc = num_cudaproc + 1
with open(logdir + 'cuda_api_trace.tmp') as f:
records = f.readlines()
# print(records[1])
if len(records) > 0 and records[1].split(',')[0] == '"Start"':
indices = records[1].replace(
'"', '').replace(
'\n', '').split(',')
ts_rescale = 1.0
if records[2].split(',')[0] == 'ms':
ts_rescale = 1.0e3
elif records[2].split(',')[0] == 'us':
ts_rescale = 1.0e6
dt_rescale = 1.0
if records[2].split(',')[1] == 'ms':
dt_rescale = 1.0e3
elif records[2].split(',')[1] == 'us':
dt_rescale = 1.0e6
records = records[3:]
print_info(cfg,"Length of cuda_api_traces = %d" % len(records))
#TODO: Apply parallel search to speed up
t_base = float(records[0].split(',')[0])
if len(records[0].split(',')) == 4:
for record in records:
if int(record.split(',')[3]) == first_corid:
t_base = float(record.split(',')[0])
print_info(cfg,'First Correlation_ID ' + str(first_corid) + ' is found in cuda_api_trace.tmp')
print_info(cfg,'First API trace timestamp is ' + str(t_base))
break
with mp.Pool(processes=cpu_count) as pool:
res = pool.map(
partial(
cuda_api_trace_read,
cfg=cfg,
indices=indices,
ts_rescale=ts_rescale,
dt_rescale=dt_rescale,
payload_unit=payload_unit,
n_cudaproc=num_cudaproc,
t_offset=t_glb_gpu_base -
t_base),
records)
cuda_api_traces = pd.DataFrame(res)
cuda_api_traces.columns = sofa_fieldnames
res_viz = list_downsample(res, cfg.plot_ratio)
cuda_api_traces_viz = pd.DataFrame(res_viz)
cuda_api_traces_viz.columns = sofa_fieldnames
cuda_api_traces.to_csv(
logdir + 'cuda_api_trace.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
# ============ Preprocessing CPU Trace ==========================
with open(logdir+'perf_events_used.txt','r') as f:
lines = f.readlines()
if lines:
cfg.perf_events = lines[0]
else:
cfg.perf_events = ''
print_info(cfg,'perf_events_used: %s' % (cfg.perf_events))
# Determine time base for perf traces
perf_timebase_uptime = 0
perf_timebase_unix = 0
last_nvvp_ts = 0
for nvvp_filename in glob.glob(logdir + "cuhello*[0-9].nvvp"):
print_progress(cfg,"Read " + nvvp_filename + " by nvprof -- begin")
engine = create_engine('sqlite:///' + nvvp_filename)
last_nvvp_tss = []
try:
last_nvvp_tss.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_MEMSET',engine)).iloc[-1]['start'])
except BaseException:
print_info(cfg,'NO MEMSET')
try:
last_nvvp_tss.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_MEMCPY',engine)).iloc[-1]['start'])
except BaseException:
print_info(cfg,'NO MEMCPY')
try:
last_nvvp_tss.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL',engine)).iloc[-1]['start'])
except BaseException:
print_info(cfg,'NO CONCURRENT KERNEL')
try:
last_nvvp_tss.append( (pd.read_sql_table('CUPTI_ACTIVITY_KIND_KERNEL',engine)).iloc[-1]['start'])
except BaseException:
print_info(cfg,'NO KERNEL')
if len(last_nvvp_tss) > 0:
last_nvvp_ts = sorted(last_nvvp_tss,reverse=True)[0]*1.0/1e+9
else:
print_warning(cfg,"There is no data in tables of NVVP file.")
if int(os.system('command -v perf 1> /dev/null')) == 0:
with open(logdir + 'cuhello.perf.script', 'w') as logfile:
subprocess.call(['perf',
'script',
'--kallsym',
'%s/kallsyms' % logdir,
'-i',
'%s/cuhello.perf.data' % logdir,
'-F',
'time,pid,tid,event,ip,sym,dso,symoff,period,brstack,brstacksym'],
stdout=logfile)
try:
with open(logdir + 'cuhello.perf.script') as f:
samples = f.readlines()
print_info(cfg,"Length of cpu_traces = %d" % len(samples))
if len(samples) > 0:
for sample in reversed(samples):
fields = sample.split()
function_name = ""
if re.match('\[\d+\]', fields[1]) is not None:
function_name = '[%s]'%fields[4].replace('-','_') + fields[6] + fields[7]
else:
function_name = '[%s]'%fields[3].replace('-','_') + fields[5] + fields[6]
if function_name.find('libcuda.so') != -1 and len(last_nvvp_tss)>0:
perf_timebase_uptime = float(sample.split()[1].split(':')[0])
perf_timebase_unix = last_nvvp_ts
break
except:
print_warning(cfg,'no cuhello.perf.script, timestamp synchronization between CPU/GPU may not be precise enough.')
print_progress(cfg,"Read " + nvvp_filename + " by nvprof -- end")
# STRACE Preprocessing
#CASE1: strace: Process 8361 attached
#CASE2: 1550311783.488821 mmap(NULL, 262144, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f501f910000 <0.000012>
#CASE3: [pid 8372] 1550311820.640979 +++ exited with 0 +++
total_strace_duration = 0
filter_keys = []
#filter_keys.append('futex')
filter_keys.append('resume')
filter_keys.append('nanosleep')
filter_keys.append('clock_gettime')
filter_keys.append('brk')
filter_keys.append('stat')
filter_keys.append('close')
filter_keys.append('exited')
filter_keys.append('access')
filter_keys.append('lseek')
filter_keys.append('getrusage')
filter_keys.append('exited')
if os.path.isfile('%s/strace.txt' % logdir):
with open('%s/strace.txt' % logdir) as f:
lines = f.readlines()
print_info(cfg,"Length of straces = %d" % len(lines))
if len(lines) > 1:
strace_list = []
strace_list.append(np.empty((len(sofa_fieldnames), 0)).tolist())
for i in range(len(lines)):
if i % cfg.plot_ratio > 0:
continue
pid = cfg.pid
tid = 0
b_skip = False
for key in filter_keys:
if lines[i].find(key) != -1:
b_skip = True
if b_skip:
continue
fields = lines[i].split()
if fields[0].find('pid') != -1 :
tid = int(fields[1].split(']')[0])
t_begin = float(fields[2])
strace_info = ''.join(fields[3:-3])
else:
tid = pid
t_begin = float(fields[1])
strace_info = ''.join(fields[1:-3])
if not cfg.absolute_timestamp:
t_begin = t_begin - cfg.time_base
#strace_info = strace_info.split('(')[0]
try:
duration = float(fields[-1].split('<')[1].split('>')[0])
except:
duration = 0
total_strace_duration = total_strace_duration + duration
if duration < cfg.strace_min_time:
continue
deviceId = -1
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
trace = [
t_begin,
event,
duration,
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
strace_info,
cpuid]
strace_list.append(trace)
print_info(cfg, 'strace.txt reading is done.')
if len(strace_list)>1:
strace_traces = list_to_csv_and_traces(cfg, strace_list, 'strace.csv', 'w')
print_info(cfg,'Total strace duration: %.3lf' % total_strace_duration)
# Pystacks Preprocessing
def parse_pystacks(filepath, ignore_idle=False):
ret = {}
with open(filepath, 'r') as f:
for ts, fs in itertools.zip_longest(*[f] * 2):
fs = fs.replace('\n', '').replace(';', '<br>')
if ignore_idle:
if fs.find('idle') != -1:
continue
ret[int(ts) / 10 ** 6] = fs
duration = {}
prev = None
for k, val in ret.items():
if prev is None:
prev = k
continue
duration[prev] = k - prev
prev = k
del ret[max(ret.keys())]
return ret, duration
if os.path.isfile('{}/pystacks.txt'.format(logdir)):
fstack, dur = parse_pystacks('{}/pystacks.txt'.format(logdir), ignore_idle=True)
pystacks_list = []
if fstack:
for key, info in fstack.items():
deviceId = -1
event = -1
copyKind = -1
payload = -1
bandwidth = -1
pkt_src = pkt_dst = -1
pid = tid = -1
t_begin = key if cfg.absolute_timestamp else key - cfg.time_base
trace = [
t_begin,
event,
float(dur[key]),
deviceId,
copyKind,
payload,
bandwidth,
pkt_src,
pkt_dst,
pid,
tid,
info,
cpuid
]
pystacks_list.append(trace)
if pystacks_list:
pystacks_traces = list_to_csv_and_traces(cfg, pystacks_list, 'pystacks.csv', 'w')
# Time synchronization among BIOS Time (e.g. used by perf) and NTP Time (e.g. NVPROF, tcpdump, etc.)
if perf_timebase_unix == 0:
with open(logdir + 'perf_timebase.txt') as f:
lines = f.readlines()
if len(lines) <= 3:
print_warning(cfg,'Recorded progrom is too short.')
perf_timebase_uptime = 0
perf_timebase_unix = 0
elif lines[0].find('WARNING') != -1:
perf_timebase_uptime = 0
perf_timebase_unix = 0
elif platform.platform().find('Darwin') != -1:
perf_timebase_unix = float(lines[-1].split()[0])
perf_timebase_uptime = perf_timebase_unix
else:
try:
perf_timebase_uptime = float(lines[-2].split()[2].split(':')[0])
perf_timebase_unix = float(lines[-1].split()[0])
except:
print_warning(cfg,'Incorrect format in perf_timebase.txt; the profiling time might be too short.')
try:
with open(logdir + 'perf.script') as f:
samples = f.readlines()
print_info(cfg,"Length of cpu_traces = %d" % len(samples))
if len(samples) > 0:
with mp.Pool(processes=cpu_count) as pool:
res = pool.map(
partial(
cpu_trace_read,
cfg = cfg,
t_offset = perf_timebase_unix - perf_timebase_uptime,
cpu_mhz_xp = cpu_mhz_xp,
cpu_mhz_fp = cpu_mhz_fp),
samples)
cpu_traces = pd.DataFrame(res)
cpu_traces.columns = sofa_fieldnames
cpu_traces.to_csv(
logdir + 'cputrace.csv',
mode='w',
header=True,
index=False,
float_format='%.6f')
res_viz = list_downsample(res, cfg.plot_ratio)
cpu_traces_viz = pd.DataFrame(res_viz)
cpu_traces_viz.columns = sofa_fieldnames
char1 = ']'
char2 = '+'
# demangle c++ symbol, little dirty work here...
cpu_traces_viz['name'] = cpu_traces_viz['name'].apply(
lambda x: cxxfilt.demangle(str( x[x.find(char1)+1 : x.find(char2)].split('@')[0] ))
)
### Apply filters for cpu traces
filtered_groups = []
if len(cpu_traces) > 0:
df_grouped = cpu_traces_viz.groupby('name')
for filter in cfg.cpu_filters:
group = cpu_traces_viz[cpu_traces_viz['name'].str.contains(
filter.keyword)]
filtered_groups.append({'group': group,
'color': filter.color,
'keyword': filter.keyword})
if cfg.enable_swarms:
try:
swarm_stats = []
swarms = []
#swarms, swarm_stats = hsg_v1(cfg, cpu_traces, swarms, swarm_stats, perf_timebase_unix - perf_timebase_uptime, cpu_mhz_xp, cpu_mhz_fp)
cpu_traces, swarms = hsg_v2(cfg, cpu_traces, export_file=cfg.logdir+'/swarms_report.txt')
except TypeError:
print_warning(cfg,'HSG returned a None object to swarms, check if sofalog/perf.data can be accessed.')
pass
except:
print_warning(cfg,'no perf traces.')
print_progress(cfg,
"Export Overhead Dynamics JSON File of CPU, Network and GPU traces -- begin")
# TODO: provide option to use absolute or relative timestamp
# cpu_traces.loc[:,'timestamp'] -= cpu_traces.loc[0,'timestamp']
# net_traces.loc[:,'timestamp'] -= net_traces.loc[0,'timestamp']
# gpu_traces.loc[:,'timestamp'] -= gpu_traces.loc[0,'timestamp']
traces = []
if len(cpu_traces_viz) > 0:
sofatrace = SOFATrace()
sofatrace.name = 'cpu_trace'
sofatrace.title = 'CPU'
sofatrace.color = 'DarkGray'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = cpu_traces_viz
traces.append(sofatrace)
for filtered_group in filtered_groups:
sofatrace = SOFATrace()
sofatrace.name = filtered_group['keyword']
sofatrace.title = '[keyword]' + sofatrace.name
sofatrace.color = filtered_group['color']
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = filtered_group['group'].copy()
traces.append(sofatrace)
if cfg.enable_swarms and len(swarms) > 0 :
traces = swarms_to_sofatrace(cfg, swarms, traces) # append data of hsg function
if os.path.isdir(cfg.logdir+'/container_root'):
subprocess.call(with_sudo + 'umount %s/container_root ' % (cfg.logdir), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.call('rm -r %s/container_root ' % (cfg.logdir), shell=True )
if os.path.isfile(cfg.logdir+'/cidfile.txt'):
subprocess.call('docker stop %s' % (cid), shell=True )
sofatrace = SOFATrace()
sofatrace.name = 'blktrace_starting_block'
sofatrace.title = 'BLKTRACE_STARTING_BLOCK'
sofatrace.color = 'Green'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = blk_d_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'blktrace_time'
sofatrace.title = 'BLKTRACE_TIME'
sofatrace.color = 'DodgerBlue'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = blk_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'vmstat_cs'
sofatrace.title = 'VMSTAT_CS'
sofatrace.color = 'Pink'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = vm_cs_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'vmstat_bi'
sofatrace.title = 'VMSTAT_BI'
sofatrace.color = 'DarkOrange'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = vm_bi_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'vmstat_bo'
sofatrace.title = 'VMSTAT_BO'
sofatrace.color = 'DarkOrchid'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = vm_bo_traces
traces.append(sofatrace)
if cfg.enable_mpstat:
sofatrace = SOFATrace()
sofatrace.name = 'mpstat_usr'
sofatrace.title = 'MPSTAT_USR'
sofatrace.color = 'Cyan'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = mpstat_traces
traces.append(sofatrace)
if cfg.enable_diskstat:
sofatrace = SOFATrace()
sofatrace.name = 'diskstat'
sofatrace.title = 'DISK_USAGE (MB/s)'
sofatrace.color = 'GreenYellow'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'bandwidth'
sofatrace.data = diskstat_traces
traces.append(sofatrace)
if cfg.enable_vmstat:
sofatrace = SOFATrace()
sofatrace.name = 'vmstat_usr'
sofatrace.title = 'CPU_USAGE_USR'
sofatrace.color = 'Magenta'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = vm_usr_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'vmstat_sys'
sofatrace.title = 'CPU_USAGE_SYS'
sofatrace.color = 'LightBlue'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = vm_sys_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'vmstat_in'
sofatrace.title = 'VMSTAT_IN'
sofatrace.color = 'DarkMagenta'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = vm_in_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'vmstat_cs'
sofatrace.title = 'VMSTAT_CS'
sofatrace.color = 'DarkOliveGreen'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = vm_cs_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'strace'
sofatrace.title = 'STRACE.'
sofatrace.color = 'DarkSlateGray'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = strace_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'pystacks'
sofatrace.title = 'Python-stacks.'
sofatrace.color = 'Tomato'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = pystacks_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'nvsmi_mem'
sofatrace.title = 'GPU_MEM_Util.'
sofatrace.color = 'lightblue'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = nvsmi_mem_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'nvsmi_sm'
sofatrace.title = 'GPU_SM_Util.'
sofatrace.color = 'red'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = nvsmi_sm_traces
traces.append(sofatrace)
if cfg.enable_encode_decode:
sofatrace = SOFATrace()
sofatrace.name = 'nvsmi_enc'
sofatrace.title = 'GPU_ENC_Util.'
sofatrace.color = 'rgba(255, 215, 0, 0.8)' #Gold
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = nvsmi_enc_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'nvsmi_dec'
sofatrace.title = 'GPU_DEC_Util.'
sofatrace.color = 'rgba(218, 165, 32, 0.8)' #GoldenRod
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = nvsmi_dec_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'net_trace'
sofatrace.title = 'NET'
sofatrace.color = 'blue'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = net_traces
traces.append(sofatrace)
if cfg.net_filters:
for filtered_net_group in filtered_net_groups:
sofatrace = SOFATrace()
sofatrace.name = filtered_net_group['keyword']
sofatrace.title = '[keyword]' + sofatrace.name
sofatrace.color = filtered_net_group['color']
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = filtered_net_group['group'].copy()
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'tx_bandwidth'
sofatrace.title = 'tx Bandwidth'
sofatrace.color = 'rgba(135,206,250,0.8)' # LightSkyBlue
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'bandwidth'
sofatrace.data = tx_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'rx_bandwidth'
sofatrace.title = 'rx Bandwidth'
sofatrace.color = 'rgba(25,25,112,0.8)' # MidnightBlue
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'bandwidth'
sofatrace.data = rx_traces
traces.append(sofatrace)
sofatrace = SOFATrace()
sofatrace.name = 'gpu_kernel_trace'
sofatrace.title = 'GPU kernel'
sofatrace.color = 'rgba(0,180,0,0.8)'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = gpu_traces_viz
traces.append(sofatrace)
for filtered_gpu_group in filtered_gpu_groups:
sofatrace = SOFATrace()
sofatrace.name = filtered_gpu_group['keyword']
sofatrace.title = '[keyword]' + sofatrace.name
sofatrace.color = filtered_gpu_group['color']
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = filtered_gpu_group['group'].copy()
traces.append(sofatrace)
if cfg.cuda_api_tracing:
sofatrace = SOFATrace()
sofatrace.name = 'cuda_api_trace'
sofatrace.title = 'CUDA API'
sofatrace.color = 'DarkSlateGray'
sofatrace.x_field = 'timestamp'
sofatrace.y_field = 'duration'
sofatrace.data = cuda_api_traces_viz
traces.append(sofatrace)
traces_to_json(traces, logdir + 'report.js', cfg)
print_progress(cfg,
"Export Overhead Dynamics JSON File of CPU, Network and GPU traces -- end")
| apache-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/numpy/linalg/linalg.py | 2 | 77340 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
return (S > tol).sum(axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| mit |
micahhausler/pandashells | pandashells/lib/outlier_lib.py | 7 | 2092 | #! /usr/bin/env python
# standard library imports
from collections import Counter
from pandashells.lib import module_checker_lib
# import required dependencies
module_checker_lib.check_for_modules(['pandas', 'numpy'])
import pandas as pd
import numpy as np
# disable the chained assignment warning because raises fale alarm
pd.options.mode.chained_assignment = None
# recursive edit a series
def sigma_edit_series(sigma_thresh, in_series, iter_counter=None, max_iter=20):
iter_counter = Counter() if iter_counter is None else iter_counter
if in_series.count() == 0:
msg = "Error: No non-NaN values from which to remove outliers"
raise ValueError(msg)
iter_counter.update('n')
if iter_counter['n'] > max_iter:
msg = "Error: Max Number of iterations exceeded in sigma-editing"
raise ValueError(msg)
resid = in_series - in_series.mean()
std = resid.std()
sigma_t = sigma_thresh * std
outside = resid.abs() >= sigma_t
if any(outside):
in_series.loc[outside] = np.NaN
in_series = sigma_edit_series(
sigma_thresh, in_series, iter_counter, max_iter)
return in_series
def ensure_col_exists(df, col, df_name='dataframe'):
if not df.empty and col not in list(df.columns):
msg = 'in sigma_edit: {} does not have column {}'.format(
df_name, repr(col))
raise ValueError(msg)
def sigma_edit_dataframe(sigma_thresh, columns, df, max_iter=20):
"""
:type sigma_thresh: float
:param sigma_thresh: The sigma threshold
:type columns: list
:param columns: a list of columns to sigma edit
:type df: pandas.DataFrame
:param df: The dataframe with columns of data to sigma-edit
:type max_iter: int
:param max_iter: Cap the number of iteration at this number
:rtype: Pandas DataFrame
:returns: A dataframe with ouliers set to NaN
"""
for col in columns:
ensure_col_exists(df, col, 'df')
ser = df[col]
df.loc[:, col] = sigma_edit_series(sigma_thresh, ser, max_iter=max_iter)
return df
| bsd-2-clause |
astroML/astroML | astroML/linear_model/linear_regression.py | 2 | 6902 | import numpy as np
from sklearn.base import BaseEstimator
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression, Lasso, Ridge
# ------------------------------------------------------------
# Basis functions
def gaussian_basis(X, mu, sigma):
"""Gaussian Basis function
Parameters
----------
X : array_like
input data: shape = (n_samples, n_features)
mu : array_like
means of bases, shape = (n_bases, n_features)
sigma : float or array_like
must broadcast to shape of mu
Returns
-------
Xg : ndarray
shape = (n_samples, n_bases)
"""
X = np.asarray(X)
mu = np.atleast_2d(mu)
sigma = np.atleast_2d(sigma)
n_samples, n_features = X.shape
if mu.shape[1] != n_features:
raise ValueError('shape of mu must match shape of X')
r = (((X[:, None, :] - mu) / sigma) ** 2).sum(2)
Xg = np.exp(-0.5 * r)
Xg *= 1. / np.sqrt(2 * np.pi) / sigma.prod(1)
return Xg
class LinearRegression(BaseEstimator):
"""Simple Linear Regression with errors in y
This is a stripped-down version of sklearn.linear_model.LinearRegression
which can correctly accounts for errors in the y variable
Parameters
----------
fit_intercept : bool (optional)
if True (default) then fit the intercept of the data
regularization : string (optional)
['l1'|'l2'|'none'] Use L1 (Lasso) or L2 (Ridge) regression
kwds: dict
additional keyword arguments passed to sklearn estimators:
LinearRegression, Lasso (L1), or Ridge (L2)
Notes
-----
This implementation may be compared to that in
sklearn.linear_model.LinearRegression.
The difference is that here errors are
"""
_regressors = {'none': LinearRegression,
'l1': Lasso,
'l2': Ridge}
def __init__(self, fit_intercept=True, regularization='none', kwds=None):
if regularization.lower() not in ['l1', 'l2', 'none']:
raise ValueError("regularization='{}' not recognized"
"".format(regularization))
self.fit_intercept = fit_intercept
self.regularization = regularization
self.kwds = kwds
def _transform_X(self, X):
X = np.asarray(X)
if self.fit_intercept:
X = np.hstack([np.ones([X.shape[0], 1]), X])
return X
@staticmethod
def _scale_by_error(X, y, y_error=1):
"""Scale regression by error on y"""
X = np.atleast_2d(X)
y = np.asarray(y)
y_error = np.asarray(y_error)
assert X.ndim == 2
assert y.ndim == 1
assert X.shape[0] == y.shape[0]
if y_error.ndim == 0:
return X / y_error, y / y_error
elif y_error.ndim == 1:
assert y_error.shape == y.shape
X_out, y_out = X / y_error[:, None], y / y_error
elif y_error.ndim == 2:
assert y_error.shape == (y.size, y.size)
evals, evecs = np.linalg.eigh(y_error)
X_out = np.dot(evecs * (evals ** -0.5),
np.dot(evecs.T, X))
y_out = np.dot(evecs * (evals ** -0.5),
np.dot(evecs.T, y))
else:
raise ValueError("shape of y_error does not match that of y")
return X_out, y_out
def _choose_regressor(self):
model = self._regressors.get(self.regularization.lower(), None)
if model is None:
raise ValueError("regularization='{}' unrecognized"
"".format(self.regularization))
return model
def fit(self, X, y, y_error=1):
kwds = {}
if self.kwds is not None:
kwds.update(self.kwds)
kwds['fit_intercept'] = False
model = self._choose_regressor()
self.clf_ = model(**kwds)
X = self._transform_X(X)
X, y = self._scale_by_error(X, y, y_error)
self.clf_.fit(X, y)
return self
def predict(self, X):
X = self._transform_X(X)
return self.clf_.predict(X)
@property
def coef_(self):
return self.clf_.coef_
class PolynomialRegression(LinearRegression):
"""Polynomial Regression with errors in y
Parameters
----------
degree : int
degree of the polynomial.
interaction_only : bool (optional)
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
fit_intercept : bool (optional)
if True (default) then fit the intercept of the data
regularization : string (optional)
['l1'|'l2'|'none'] Use L1 (Lasso) or L2 (Ridge) regression
kwds: dict
additional keyword arguments passed to sklearn estimators:
LinearRegression, Lasso (L1), or Ridge (L2)
"""
def __init__(self, degree=1, interaction_only=False,
fit_intercept=True,
regularization='none', kwds=None):
self.degree = degree
self.interaction_only = interaction_only
LinearRegression.__init__(self, fit_intercept, regularization, kwds)
def _transform_X(self, X):
trans = PolynomialFeatures(degree=self.degree,
interaction_only=self.interaction_only,
include_bias=self.fit_intercept)
return trans.fit_transform(X)
class BasisFunctionRegression(LinearRegression):
"""Basis Function with errors in y
Parameters
----------
basis_func : str or function
specify the basis function to use. This should take an input matrix
of size (n_samples, n_features), along with optional parameters,
and return a matrix of size (n_samples, n_bases).
fit_intercept : bool (optional)
if True (default) then fit the intercept of the data
regularization : string (optional)
['l1'|'l2'|'none'] Use L1 (Lasso) or L2 (Ridge) regression
kwds: dict
additional keyword arguments passed to sklearn estimators:
LinearRegression, Lasso (L1), or Ridge (L2)
"""
_basis_funcs = {'gaussian': gaussian_basis}
def __init__(self, basis_func='gaussian', fit_intercept=True,
regularization='none', kwds=None, **kwargs):
self.basis_func = basis_func
self.kwargs = kwargs
LinearRegression.__init__(self, fit_intercept, regularization, kwds)
def _transform_X(self, X):
if callable(self.basis_func):
basis_func = self.basis_func
else:
basis_func = self._basis_funcs.get(self.basis_func, None)
X = basis_func(X, **self.kwargs)
if self.fit_intercept:
X = np.hstack([np.ones((X.shape[0], 1)), X])
return X
| bsd-2-clause |
rishikksh20/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 55 | 9939 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
lfairchild/PmagPy | dialogs/pmag_menu_dialogs.py | 1 | 93887 | #!/usr/bin/env pythonw
#--------------------------------------------------------------
# converting magnetometer files to MagIC format
#--------------------------------------------------------------
import wx
import os
import sys
import shutil
import subprocess
import wx.grid
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib import pyplot as plt
import pmagpy.pmag as pmag
import pmagpy.ipmag as ipmag
import dialogs.pmag_widgets as pw
from programs.conversion_scripts import agm_magic
from pmagpy import convert_2_magic as convert
class ImportAzDipFile(wx.Frame):
title = "Import AzDip format file"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='azdip_window')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Import an AzDip format file into your working directory"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.sampling_particulars(pnl)
#---sizer 2 ---
self.bSizer2 = pw.select_ncn(pnl)
#---sizer 3 ---
self.bSizer3 = pw.labeled_text_field(pnl, "Location:")
#---sizer 4 ----
TEXT = "Overwrite er_samples.txt file?"
label1 = "yes, overwrite file in working directory"
label2 = "no, update existing er_samples file"
er_samples_file_present = True
try:
er_samp_file = open(os.path.join(self.WD, "er_samples.txt"), "r")
er_samp_file.close()
except Exception as ex:
er_samples_file_present = False
if er_samples_file_present:
self.bSizer4 = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
try:
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
except AttributeError:
pass
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
options = {}
os.chdir(self.WD)
output_dir = self.WD
full_infile = self.bSizer0.return_value()
input_dir, infile = os.path.split(full_infile)
data_model_num = self.Parent.data_model_num
if data_model_num == 2:
Fsa = os.path.splitext(infile)[0] + "_er_samples.txt"
else:
Fsa = os.path.splitext(infile)[0] + "_samples.txt"
mcd = self.bSizer1.return_value()
ncn = self.bSizer2.return_value()
loc = self.bSizer3.return_value()
try:
app = self.bSizer4.return_value()
if app:
app = False #"" # overwrite is True
else:
app = True #"-app" # overwrite is False, append instead
except AttributeError:
app = ""
#COMMAND = "azdip_magic.py -f {} -Fsa {} -ncn {} {} {} {}".format(full_infile, Fsa, ncn, loc, mcd, app)
if len(str(ncn)) > 1:
ncn, Z = ncn.split('-')
else:
Z = 1
program_completed, error_message = ipmag.azdip_magic(infile, Fsa, ncn, Z, mcd, loc,
app, output_dir, input_dir, data_model_num)
if program_completed:
args = [str(arg) for arg in [infile, Fsa, ncn, Z, mcd, loc, app] if arg]
pw.close_window(self, 'ipmag.azdip_magic({}))'.format(", ".join(args)), Fsa)
pw.simple_warning('You have created new MagIC files.\nMake sure to go to Pmag GUI step 1 to combine and rename them before proceeding to analysis or upload!')
else:
pw.simple_warning(error_message)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=ipmag.azdip_magic.__doc__)
#class ImportODPCoreSummary(wx.Frame):
class MoveFileIntoWD(wx.Frame):
title = "Import any file into your working directory"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='any file')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Any file type"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to copy to working directory"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
WD = self.WD
full_infile = self.bSizer0.return_value()
if not full_infile:
pw.simple_warning('You must provide a file')
return False
infile = os.path.join(WD, os.path.split(full_infile)[1])
shutil.copyfile(full_infile, os.path.join(WD, infile))
pw.close_window(self, 'Copy infile to {}'.format(WD), infile)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
dlg = wx.MessageDialog(self, "Unaltered file will be copied to working directory", "Help", style=wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
class ImportIODPSampleSummary(wx.Frame):
title = "Import IODP Sample Summary csv file"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='IODP_samples')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "IODP Sample Summary csv file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
WD = self.WD
full_infile = self.bSizer0.return_value()
ID, infile = os.path.split(full_infile)
Fsa = infile[:infile.find('.')] + "_er_samples.txt"
program_ran, error_message = convert.iodp_samples(infile, Fsa, WD, ID)
if not program_ran:
pw.simple_warning(error_message)
else:
COMMAND = "iodp_samples_magic.py -WD {} -f {} -Fsa {} -ID {}".format(WD, infile, Fsa, ID)
pw.close_window(self, COMMAND, Fsa)
pw.simple_warning('You have created new MagIC files.\nMake sure to go to Pmag GUI step 1 to combine and rename them before proceeding to analysis or upload!')
#pw.run_command_and_close_window(self, COMMAND, Fsa)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=convert.iodp_samples.__doc__)
"""
class ImportModelLatitude(wx.Frame):
title = "Import Model Latitude data file"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Model latitude data"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
infile = os.path.split(self.bSizer0.return_value())[1]
outfile = os.path.join(self.WD, infile)
COMMAND = "cp {} {}".format(infile, self.WD)
pw.run_command_and_close_window(self, COMMAND, outfile)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
dlg = wx.MessageDialog(self, "Unaltered file will be copied to working directory", "Help", style=wx.OK|wx.ICON_EXCLAMATION)
dlg.ShowModal()
dlg.Destroy()
"""
class ImportKly4s(wx.Frame):
title = "kly4s format"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='kly4s')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "kly4s format"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, btn_text="Add kly4s format file", method = self.on_add_file_button)
#---sizer 1 ---
# changed to er_samples only, not azdip
self.bSizer1 = pw.choose_file(pnl, btn_text='add samples file (optional)', method = self.on_add_AZDIP_file_button)
#---sizer 2 ----
self.bSizer2 = pw.labeled_text_field(pnl)
#---sizer 3 ---
self.bSizer3 = pw.specimen_n(pnl)
#---sizer 4 ---
self.bSizer4 = pw.select_ncn(pnl)
#---sizer 5 ---
#self.bSizer5 = pw.select_specimen_ocn(pnl)
#---sizer 6 ---
self.bSizer6 = pw.labeled_text_field(pnl, "Location name:")
#---sizer 7 ---
self.bSizer7 = pw.labeled_text_field(pnl, "Instrument name (optional):")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.RIGHT, border=5)
hbox1.Add(self.bSizer7, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.panel.SetSizer(self.hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
self.hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_add_AZDIP_file_button(self,event):
text = "choose samples file (optional)"
pw.on_add_file_button(self.bSizer1, text)
def on_okButton(self, event):
os.chdir(self.WD)
WD = self.WD
full_infile = self.bSizer0.return_value()
ID, infile = os.path.split(full_infile)
outfile = infile + ".magic"
if self.Parent.data_model_num == 2:
spec_outfile = infile[:infile.find('.')] + "_er_specimens.txt"
ani_outfile = infile[:infile.find('.')] + "_rmag_anisotropy.txt"
site_outfile = ''
else:
spec_outfile = infile[:infile.find('.')] + "_specimens.txt"
ani_outfile = ''
site_outfile = infile[:infile.find('.')] + "_sites.txt"
full_samp_file = self.bSizer1.return_value()
samp_file = os.path.split(full_samp_file)[1]
if not samp_file:
samp_outfile = infile[:infile.find('.')] + "_samples.txt"
else:
samp_outfile = samp_file
user = self.bSizer2.return_value()
if user:
user = "-usr " + user
specnum = self.bSizer3.return_value()
n = "-spc " + str(specnum)
ncn = self.bSizer4.return_value()
loc = self.bSizer6.return_value()
if loc:
location = loc
loc = "-loc " + loc
else:
location = ''
ins = self.bSizer7.return_value()
if ins:
instrument = ins
ins = "-ins " + ins
else:
instrument='SIO-KLY4S'
COMMAND = "kly4s_magic.py -WD {} -f {} -F {} -fsa {} -ncn {} {} {} {} {} -ID {} -fsp {} -DM {}".format(self.WD, infile, outfile, samp_file, ncn, user, n, loc, ins, ID, spec_outfile, self.Parent.data_model_num)
program_ran, error_message = convert.kly4s(infile, specnum=specnum,
locname=location, inst=instrument,
samp_con=ncn, user=user, measfile=outfile,
aniso_outfile=ani_outfile,
samp_infile=samp_file, spec_infile='',
spec_outfile=spec_outfile,
dir_path=self.WD, input_dir_path=ID,
data_model_num=self.Parent.data_model_num,
samp_outfile=samp_outfile,
site_outfile=site_outfile)
if program_ran:
pw.close_window(self, COMMAND, outfile)
outfiles = [f for f in [outfile, spec_outfile, ani_outfile] if f]
pw.simple_warning('You have created the following files: {}\nMake sure to go to Pmag GUI step 1 to combine and rename them before proceeding to analysis or upload!'.format(", ".join(outfiles)))
else:
pw.simple_warning(error_message)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=convert.kly4s.__doc__)
class ImportK15(wx.Frame):
title = "Import K15 format file"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Import K15 format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.specimen_n(pnl)
#---sizer 2 ---
self.bSizer2 = pw.select_ncn(pnl)
#---sizer 3 ---
self.bSizer3 = pw.labeled_text_field(pnl, label="Location name:")
#---sizer 4 ---
#self.bSizer4 = pw.labeled_text_field(pnl, label="Instrument name (optional):")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
#hbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
data_model_num = self.Parent.data_model_num
os.chdir(self.WD)
full_infile = self.bSizer0.return_value()
ID, infile = os.path.split(full_infile)
outfile = infile + ".magic"
if data_model_num == 3:
samp_outfile = infile[:infile.find('.')] + "_samples.txt"
else:
samp_outfile = infile[:infile.find('.')] + "_er_samples.txt"
WD = self.WD
specnum = self.bSizer1.return_value()
ncn = self.bSizer2.return_value()
loc = self.bSizer3.return_value()
if loc:
location = loc
loc = "-loc " + loc
else:
location = "unknown"
if data_model_num == 3:
aniso_outfile = infile + '_specimens.txt'
else:
aniso_outfile = infile + '_rmag_anisotropy.txt'
# result file is only used in data model 3, otherwise ignored
aniso_results_file = infile + '_rmag_results.txt'
DM = ""
if data_model_num == 2:
DM = "-DM 2"
COMMAND = "k15_magic.py -WD {} -f {} -F {} -ncn {} -spc {} {} -ID {} -Fsa {} -Fa {} -Fr {} {}".format(WD, infile, outfile, ncn, specnum, loc, ID, samp_outfile, aniso_outfile, aniso_results_file, DM)
program_ran, error_message = convert.k15(infile, WD, ID, outfile, aniso_outfile, samp_outfile,
aniso_results_file, specnum, ncn, location, data_model_num)
print(COMMAND)
if program_ran:
pw.close_window(self, COMMAND, outfile)
outfiles = [f for f in [outfile, samp_outfile, aniso_outfile] if f]
pw.simple_warning('You have created the following files: {}\nMake sure to go to Pmag GUI step 1 to combine and rename them before proceeding to analysis or upload!'.format(", ".join(outfiles)))
else:
pw.simple_warning(error_message)
#print COMMAND
#pw.run_command_and_close_window(self, COMMAND, outfile)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=convert.k15_magic.__doc__)
class ImportSufarAscii(wx.Frame):
title = "Import Sufar Ascii format file"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='Sufar')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Sufar Ascii format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.labeled_text_field(pnl)
#---sizer 2 ----
self.bSizer2 = pw.specimen_n(pnl)
#---sizer 3 ---
self.bSizer3 = pw.select_ncn(pnl)
#---sizer 4 ---
self.bSizer4 = pw.labeled_text_field(pnl, label="Location name:")
#---sizer 5 ---
self.bSizer5 = pw.labeled_text_field(pnl, label="Instrument name (optional):")
#---sizer 6 ---
TEXT = "Use default mode?"
label1 = "spinning (default)"
label2 = "static 15 position mode"
self.bSizer6 = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#try:
# vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#except AttributeError:
# pass
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
WD = self.WD
full_infile = self.bSizer0.return_value()
ID, infile = os.path.split(full_infile)
meas_outfile = infile[:infile.find('.')] + ".magic"
if self.Parent.data_model_num == 2:
aniso_outfile = infile[:infile.find('.')] + "_rmag_anisotropy.txt"
spec_outfile = infile[:infile.find('.')] + "_er_specimens.txt"
samp_outfile = infile[:infile.find('.')] + "_er_samples.txt"
site_outfile = infile[:infile.find('.')] + "_er_sites.txt"
else:
aniso_outfile = ''
spec_outfile = infile[:infile.find('.')] + "_specimens.txt"
samp_outfile = infile[:infile.find('.')] + "_samples.txt"
site_outfile = infile[:infile.find('.')] + "_sites.txt"
usr = self.bSizer1.return_value()
if usr:
user = usr
usr = "-usr " + usr
else:
user = ""
specnum = self.bSizer2.return_value()
ncn = self.bSizer3.return_value()
loc = self.bSizer4.return_value()
if loc:
location = loc
loc = "-loc " + loc
else:
location = "unknown"
instrument = self.bSizer5.return_value()
if instrument:
ins = "-ins " + instrument
else:
ins = ''
k15 = self.bSizer6.return_value()
if k15:
k15 = ""
static_15_position_mode = False
else:
k15 = "-k15"
static_15_position_mode = True
spec_infile = None
data_model_num = self.Parent.data_model_num
COMMAND = "SUFAR4-asc_magic.py -WD {} -f {} -F {} {} -spc {} -ncn {} {} {} {} -ID {} -DM {}".format(WD, infile, meas_outfile, usr, specnum, ncn, loc, ins, k15, ID, data_model_num)
program_ran, error_message = convert.sufar4(infile, meas_outfile, aniso_outfile,
spec_infile, spec_outfile, samp_outfile,
site_outfile, specnum, ncn, user,
location, instrument,static_15_position_mode,
WD, ID, data_model_num)
if program_ran:
pw.close_window(self, COMMAND, meas_outfile)
outfiles = [meas_outfile, spec_outfile, samp_outfile, site_outfile]
pw.simple_warning('You have created the following files: {}\nMake sure to go to Pmag GUI step 1 to combine and rename them before proceeding to analysis or upload!'.format(", ".join(outfiles)))
else:
pw.simple_warning(error_message)
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=convert.sufar4.__doc__)
class ImportAgmFile(wx.Frame):
title = "Import single .agm file"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='agm_file')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Micromag agm format file"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ---
self.bSizer1 = pw.labeled_text_field(pnl)
#---sizer 2 ----
self.bSizer2 = pw.specimen_n(pnl)
#---sizer 3 ---
self.bSizer3 = pw.select_ncn(pnl)
#---sizer 4 ---
self.bSizer4 = pw.labeled_text_field(pnl, label="Location name:")
#---sizer 5 ---
self.bSizer5 = pw.labeled_text_field(pnl, label="Instrument name (optional):")
#---sizer 6---
self.bSizer6 = pw.labeled_yes_or_no(pnl, "Units", "CGS units (default)", "SI units")
#---sizer 7 ---
self.bSizer7 = pw.check_box(pnl, "backfield curve")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox1.Add(self.bSizer5, flag=wx.ALIGN_LEFT)
hbox2.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox2.Add(self.bSizer7, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
options_dict={}
WD = self.WD
full_infile = self.bSizer0.return_value()
ID, infile = os.path.split(full_infile)
outfile = infile + ".magic"
#spec_outfile = infile[:infile.find('.')] + "_er_specimens.txt"
spec_outfile = infile[:infile.find('.')] + "_specimens.txt"
usr = self.bSizer1.return_value()
user = usr
if usr:
usr = "-usr " + usr
spc = self.bSizer2.return_value()
ncn = self.bSizer3.return_value()
loc = self.bSizer4.return_value()
location = loc
if loc:
loc = "-loc " + loc
ins = self.bSizer5.return_value()
#if ins:
# ins = "-ins " + ins
units = self.bSizer6.return_value()
if units:
units = 'cgs'
else:
units = 'SI'
bak = ''
backfield_curve = False
if self.bSizer7.return_value():
bak = "-bak"
backfield_curve = True
magicoutfile=os.path.split(infile)[1]+".magic"
SPEC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_specimens.txt"
SAMP_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_samples.txt"
SITE_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_sites.txt"
LOC_OUTFILE = magicoutfile[:magicoutfile.find('.')] + "_locations.txt"
options_dict['meas_outfile'] = outfile
options_dict['agm_file'] = infile
options_dict['spec_outfile'] = SPEC_OUTFILE
options_dict['samp_outfile'] = SAMP_OUTFILE
options_dict['site_outfile'] = SITE_OUTFILE
options_dict['loc_outfile'] = LOC_OUTFILE
options_dict['specnum'] =spc
COMMAND = "agm_magic.py -WD {} -ID {} -f {} -F {} -Fsp {} {} -spc {} -ncn {} {} {} -u {} {}".format(WD, ID, infile, outfile, spec_outfile, usr, spc, ncn, loc, ins, units, bak)
samp_infile = None
print("COMMAND: ",COMMAND)
if convert.agm(**options_dict):
pw.close_window(self,COMMAND,outfile)
pw.simple_warning('You have created the following files: {}\nMake sure to go to Pmag GUI step 1 to combine and rename them before proceeding to analysis or upload!'.format(outfile))
else:
pw.simple_warning()
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=agm_magic.do_help())
class ImportAgmFolder(wx.Frame):
title = "Import folder of Micromag agm files"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='agm_directory')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Folder of agm files"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_dir(pnl, 'add', method = self.on_add_dir_button)
#---sizer 0a ----
text = "Note on input directory:\nAll file names must be SPECIMEN_NAME.AGM for hysteresis\nor SPECIMEN_NAME.IRM for backfield (case insensitive)"
self.bSizer0a = pw.simple_text(pnl, text)
#---sizer 1 ----
self.bSizer1 = pw.labeled_text_field(pnl)
#---sizer 2 ----
self.bSizer2 = pw.specimen_n(pnl)
#---sizer 3 ---
self.bSizer3 = pw.select_ncn(pnl)
#---sizer 4 ---
self.bSizer4 = pw.labeled_text_field(pnl, label="Location name:")
#---sizer 5 ---
self.bSizer5 = pw.labeled_text_field(pnl, label="Instrument name (optional):")
#---sizer 6---
self.bSizer6 = pw.labeled_yes_or_no(pnl, "Units", "CGS units (default)", "SI units")
#---sizer 7---
self.bSizer7 = pw.labeled_yes_or_no(pnl, "Format", "New (default)", "Old")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox1.Add(self.bSizer5, flag=wx.ALIGN_LEFT)
hbox2.Add(self.bSizer6, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox2.Add(self.bSizer7, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
#
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_dir_button(self,event):
text = "choose directory of files to convert to MagIC"
pw.on_add_dir_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
WD = self.WD
ID = self.bSizer0.return_value()
files = os.listdir(ID)
files = [str(f) for f in files if str(f).endswith('.agm') or str(f).endswith('.irm')]
usr = self.bSizer1.return_value()
#if usr:
# usr = "-usr " + usr
spc = self.bSizer2.return_value()
ncn = self.bSizer3.return_value()
loc_name = self.bSizer4.return_value()
if loc_name:
loc = "-loc " + loc_name
else:
loc=""
ins = self.bSizer5.return_value()
#if ins:
# ins = "-ins " + ins
units = self.bSizer6.return_value()
if units:
units = 'cgs'
else:
units = 'SI'
fmt = self.bSizer7.return_value()
if fmt:
fmt = "new"
else:
fmt = "old"
# loop through all .agm and .irm files
warn = False
outfiles = []
for f in files:
if f.endswith('.irm'):
bak = "-bak"
bak_curve = True
else:
bak = ""
bak_curve = False
infile = os.path.join(ID, f)
outfile = f + ".magic"
outfiles.append(outfile)
stem = infile.split('.')[0]
SPEC_OUTFILE = stem + "_specimens.txt"
SAMP_OUTFILE = stem + "_samples.txt"
SITE_OUTFILE = stem + "_sites.txt"
LOC_OUTFILE = stem + "_locations.txt"
options_dict={}
options_dict['meas_outfile'] = outfile
options_dict['agm_file'] = infile
options_dict['spec_outfile'] = SPEC_OUTFILE
options_dict['samp_outfile'] = SAMP_OUTFILE
options_dict['site_outfile'] = SITE_OUTFILE
options_dict['fmt'] = fmt
COMMAND = "agm_magic.py -WD {} -ID {} -f {} -F {} -Fsp {} {} -spc {} -ncn {} {} {} -u {} {}".format(WD, ID, f, outfile, SPEC_OUTFILE, usr, spc, ncn, loc, ins, units, bak)
samp_infile = None
print("COMMAND: ",COMMAND)
print('options_dict', options_dict)
program_ran, error_msg = convert.agm(**options_dict)
if program_ran:
pass
#pw.close_window(self,COMMAND,outfile)
else:
warn = True
pw.simple_warning("Something went wrong.\n{}".format(error_msg))
if not warn:
ellipses = False
if len(outfiles) >= 8:
outfiles = outfiles[:8]
ellipses = True
pw.close_window(self,COMMAND,outfiles,ellipses)
pw.simple_warning('You have created MagIC files. Make sure to go to Pmag GUI step 1 to combine and rename them before proceeding to analysis or upload!')
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=agm_magic.do_help())
class ExportResults(wx.Frame):
title = "Extract results"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='export results')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Generate Excel or LaTeX files with site data"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add site file', method=self.on_add_file_button,
remove_button="Don't use site file")
res_file = os.path.join(self.WD, 'sites.txt')
self.check_and_add_file(res_file, self.bSizer0.file_path)
#---sizer 1 ----
self.bSizer1 = pw.choose_file(pnl, 'add criteria file', method=self.on_add_crit_button,
remove_button="Don't use criteria file")
crit_file = os.path.join(self.WD, 'criteria.txt')
self.check_and_add_file(crit_file, self.bSizer1.file_path)
#---sizer 2 ---
self.bSizer2 = pw.choose_file(pnl, 'add specimen file', method=self.on_add_spec_button,
remove_button="Don't use specimen file")
spec_file = os.path.join(self.WD, 'specimens.txt')
self.check_and_add_file(spec_file, self.bSizer2.file_path)
#---sizer 3 ---
#self.bSizer3 = pw.choose_file(pnl, 'add age file', method=self.on_add_age_button,
# remove_button="Don't use age file")
#age_file = os.path.join(self.WD, 'er_ages.txt')
#self.check_and_add_file(age_file, self.bSizer3.file_path)
#---sizer 4 ---
self.bSizer4 = pw.check_box(pnl, "output LaTeX-formatted files")
#---sizer 5 ---
#self.bSizer5 = pw.check_box(pnl, "grade specimens (only works with PmagPy generated specimen files")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
#hbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def check_and_add_file(self, infile, add_here):
if os.path.isfile(infile):
add_here.SetValue(infile)
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_add_crit_button(self, event):
text = "choose criteria file"
pw.on_add_file_button(self.bSizer1, text)
def on_add_spec_button(self, event):
text = "choose specimen file"
pw.on_add_file_button(self.bSizer2, text)
#def on_add_age_button(self, event):
# text = "choose age file"
# pw.on_add_file_button(self.bSizer3, text)
def on_okButton(self, event):
os.chdir(self.WD)
COMMAND = ""
print(COMMAND)
site_file = self.bSizer0.return_value()
#if site_file:
# site_file = os.path.split(site_file)[1]
crit_file = self.bSizer1.return_value()
#if crit_file:
# crit_file = os.path.split(crit_file)[1]
spec_file = self.bSizer2.return_value()
if (not site_file) and (not spec_file) and (not crit_file):
pw.simple_warning("You must provide at least one file to generate tables")
return
#if spec_file:
# spec_file = os.path.split(spec_file)[1]
#age_file = self.bSizer3.return_value()
#if age_file:
# age_file = os.path.split(age_file)[1]
latex = self.bSizer4.return_value()
#grade = self.bSizer5.return_value()
WD = self.WD
COMMAND = "ipmag.sites_extract(site_file='{}', output_dir_path='{}', latex='{}'".format(site_file, WD, latex)
print(COMMAND)
outfiles = []
if site_file:
res, outfiles = ipmag.sites_extract(site_file, output_dir_path=WD, latex=latex)
if res:
outfiles = [os.path.split(f)[1] for f in outfiles]
if spec_file:
res, files = ipmag.specimens_extract(spec_file, output_dir_path=WD, latex=latex)
if res:
outfiles.extend([os.path.split(f)[1] for f in files])
if crit_file:
res, files = ipmag.criteria_extract(crit_file, output_dir_path=WD, latex=latex)
if res:
outfiles.extend([os.path.split(f)[1] for f in files])
pw.close_window(self, COMMAND, ", ".join(outfiles))
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=ipmag.extract_sites.__doc__)
class ExportResults2(wx.Frame):
title = "Extract results"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Generate tab delimited text or LaTeX files with result data"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add result file', method=self.on_add_file_button)
res_file = os.path.join(self.WD, 'pmag_results.txt')
self.check_and_add_file(res_file, self.bSizer0.file_path)
#---sizer 1 ----
self.bSizer1 = pw.choose_file(pnl, 'add criteria file', method=self.on_add_crit_button,
remove_button="Don't use criteria file")
crit_file = os.path.join(self.WD, 'pmag_criteria.txt')
self.check_and_add_file(crit_file, self.bSizer1.file_path)
#---sizer 2 ---
self.bSizer2 = pw.choose_file(pnl, 'add specimen file', method=self.on_add_spec_button,
remove_button="Don't use specimen file")
spec_file = os.path.join(self.WD, 'pmag_specimens.txt')
self.check_and_add_file(spec_file, self.bSizer2.file_path)
#---sizer 3 ---
self.bSizer3 = pw.choose_file(pnl, 'add age file', method=self.on_add_age_button,
remove_button="Don't use age file")
age_file = os.path.join(self.WD, 'er_ages.txt')
self.check_and_add_file(age_file, self.bSizer3.file_path)
#---sizer 4 ---
self.bSizer4 = pw.check_box(pnl, "output LaTeX-formatted files")
#---sizer 5 ---
self.bSizer5 = pw.check_box(pnl, "grade specimens (only works with PmagPy generated specimen files")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox.Add(self.bSizer5, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def check_and_add_file(self, infile, add_here):
if os.path.isfile(infile):
add_here.SetValue(infile)
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_add_crit_button(self, event):
text = "choose criteria file"
pw.on_add_file_button(self.bSizer1, text)
def on_add_spec_button(self, event):
text = "choose specimen file"
pw.on_add_file_button(self.bSizer2, text)
def on_add_age_button(self, event):
text = "choose age file"
pw.on_add_file_button(self.bSizer3, text)
def on_okButton(self, event):
os.chdir(self.WD)
COMMAND = ""
print(COMMAND)
res_file = self.bSizer0.return_value()
if not os.path.isfile(res_file):
pw.simple_warning("You must have a result file to run this step")
return
res_file = os.path.split(res_file)[1]
crit_file = self.bSizer1.return_value()
if crit_file:
crit_file = os.path.split(crit_file)[1]
spec_file = self.bSizer2.return_value()
if spec_file:
spec_file = os.path.split(spec_file)[1]
age_file = self.bSizer3.return_value()
if age_file:
age_file = os.path.split(age_file)[1]
latex = self.bSizer4.return_value()
grade = self.bSizer5.return_value()
WD = self.WD
COMMAND = "ipmag.pmag_results_extract(res_file='{}', crit_file='{}', spec_file='{}', age_file='{}', latex='{}' grade='{}', WD='{}')".format(res_file, crit_file, spec_file, age_file, latex, grade, WD)
print(COMMAND)
res, outfiles = ipmag.pmag_results_extract(res_file, crit_file, spec_file, age_file,
latex, grade, WD)
outfiles = [os.path.split(f)[1] for f in outfiles]
ipmag.pmag_results_extract(res_file, crit_file, spec_file, age_file, latex, grade, WD)
pw.close_window(self, COMMAND, ", ".join(outfiles))
#pw.run_command_and_close_window(self, COMMAND, "er_samples.txt")
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=ipmag.pmag_results_extract.__doc__)
### Analysis and plots
class CustomizeCriteria(wx.Frame):
title = "Customize Criteria"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Update your criteria"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
choices = ['Use default criteria', 'Update default criteria', 'Use no criteria', 'Update existing criteria']
self.bSizer0 = pw.radio_buttons(pnl, choices)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(hbox, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#try:
# vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#except AttributeError:
# pass
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
choice = self.bSizer0.return_value()
critout = os.path.join(self.WD, 'pmag_criteria.txt')
if choice == 'Use default criteria' or choice == 'Use no criteria':
if choice == 'Use default criteria':
crit_data=pmag.default_criteria(0)
crit_data,critkeys=pmag.fillkeys(crit_data)
pmag.magic_write(critout,crit_data,'pmag_criteria')
# pop up instead of print
MSG="Default criteria saved in {}/pmag_criteria.txt".format(self.WD)
elif choice == 'Use no criteria':
crit_data = pmag.default_criteria(1)
pmag.magic_write(critout,crit_data,'pmag_criteria')
MSG="Extremely loose criteria saved in {}/pmag_criteria.txt".format(self.WD)
dia = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
dia.ShowModal()
dia.Destroy()
self.Parent.Raise()
self.Destroy()
return
if choice == "Update existing criteria":
try:
crit_data, file_type = pmag.magic_read(os.path.join(self.WD, "pmag_criteria.txt"))
if file_type != "pmag_criteria":
raise Exception
except Exception as ex:
print("exception", ex)
MSG = "No pmag_criteria.txt file found in working directory ({})".format(self.WD)
dia = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
return 0
default_criteria = pmag.default_criteria(1)[0]
crit_data = crit_data[0]
crit_data = dict(default_criteria, **crit_data)
elif choice == "Update default criteria":
crit_data = pmag.default_criteria(0)[0]
frame = wx.Frame(self)
window = wx.ScrolledWindow(frame)
self.boxes = pw.large_checkbox_window(window, crit_data)
bSizer = wx.BoxSizer(wx.VERTICAL)
bSizer.Add(self.boxes)
hboxok = wx.BoxSizer(wx.HORIZONTAL)
edit_okButton = wx.Button(window, wx.ID_ANY, "&OK")
edit_cancelButton = wx.Button(window, wx.ID_ANY, '&Cancel')
hboxok.Add(edit_okButton, 0, wx.ALL, 5)
hboxok.Add(edit_cancelButton, 0, wx.ALL, 5 )
window.Bind(wx.EVT_BUTTON, self.on_cancelButton, edit_cancelButton)
window.Bind(wx.EVT_BUTTON, self.on_edit_okButton, edit_okButton)
bSizer.Add(hboxok)
window.SetSizer(bSizer)
bSizer.Fit(frame)
window.SetScrollbars(20, 20, 50, 50)
frame.Centre()
frame.Show()
def on_edit_okButton(self, event):
print(self.boxes.return_value())
crit_data = self.boxes.return_value()
critout = os.path.join(self.WD, '/pmag_criteria.txt')
pmag.magic_write(critout, crit_data, 'pmag_criteria')
MSG = "pmag_criteria.txt file has been updated"
dia = wx.MessageDialog(None,caption="Message:", message=MSG ,style=wx.OK|wx.ICON_INFORMATION)
dia.ShowModal()
dia.Destroy()
self.on_cancelButton(None)
def on_cancelButton(self, event):
for child in self.GetChildren():
child.Destroy()
#child_window = child.GetWindow()
#print child_window
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
print("do help button")
# have a little info thing pop up
def add_thellier_gui_criteria(acceptance_criteria):
'''criteria used only in thellier gui
these criteria are not written to pmag_criteria.txt
'''
category="thellier_gui"
for crit in ['sample_int_n_outlier_check','site_int_n_outlier_check']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=-999
acceptance_criteria[crit]['threshold_type']="low"
acceptance_criteria[crit]['decimal_points']=0
for crit in ['sample_int_interval_uT','sample_int_interval_perc',\
'site_int_interval_uT','site_int_interval_perc',\
'sample_int_BS_68_uT','sample_int_BS_95_uT','sample_int_BS_68_perc','sample_int_BS_95_perc','specimen_int_max_slope_diff']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=-999
acceptance_criteria[crit]['threshold_type']="high"
if crit in ['specimen_int_max_slope_diff']:
acceptance_criteria[crit]['decimal_points']=-999
else:
acceptance_criteria[crit]['decimal_points']=1
acceptance_criteria[crit]['comments']="thellier_gui_only"
for crit in ['average_by_sample_or_site','interpreter_method']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
if crit in ['average_by_sample_or_site']:
acceptance_criteria[crit]['value']='sample'
if crit in ['interpreter_method']:
acceptance_criteria[crit]['value']='stdev_opt'
acceptance_criteria[crit]['threshold_type']="flag"
acceptance_criteria[crit]['decimal_points']=-999
for crit in ['include_nrm']:
acceptance_criteria[crit]={}
acceptance_criteria[crit]['category']=category
acceptance_criteria[crit]['criterion_name']=crit
acceptance_criteria[crit]['value']=True
acceptance_criteria[crit]['threshold_type']="bool"
acceptance_criteria[crit]['decimal_points']=-999
# define internal Thellier-GUI definitions:
#self.average_by_sample_or_site='sample'
#self.stdev_opt=True
#self.bs=False
#self.bs_par=False
class ZeqMagic(wx.Frame):
title = "Zeq Magic"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "some text"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
#self.bSizer1 = pw.specimen_n(pnl)
#---sizer 2 ---
#self.bSizer2 = pw.select_ncn(pnl)
#---sizer 3 ---
#self.bSizer3 = pw.labeled_text_field(pnl, label="Location name:")
#---sizer 4 ---
#self.bSizer4 = pw.labeled_text_field(pnl, label="Instrument name (optional):")
#---sizer 4 ----
#try:
# open(self.WD + "/er_samples.txt", "r")
#except Exception as ex:
# er_samples_file_present = False
#if er_samples_file_present:
# self.bSizer4 = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
#hbox = wx.BoxSizer(wx.HORIZONTAL)
#hbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
#hbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(hbox, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#try:
# vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#except AttributeError:
# pass
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
COMMAND = "zeq_magic.py -WD {}".format(self.WD)
print(COMMAND)
#pw.run_command_and_close_window(self, COMMAND, "er_samples.txt")
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text='')
class Core_depthplot(wx.Frame):
title = "Remanence data vs. depth/height/age"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='core_depthplot')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "This program allows you to plot various measurement data versus sample depth.\nYou must provide either a magic_measurements file or a pmag_specimens file (or, you can use both)."
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, btn_text='add measurements file',
method=self.on_add_measurements_button,
remove_button="Don't use measurements file")
meas_file = os.path.join(self.WD, 'magic_measurements.txt')
self.check_and_add_file(meas_file, self.bSizer0.file_path)
#---sizer 4 ---
color_choices = ['blue', 'green','red','cyan','magenta', 'yellow', 'black','white']
self.bSizer4 = pw.radio_buttons(pnl, color_choices, "choose color for plot points")
#---sizer 5 ---
shape_choices = ['circle', 'triangle_down','triangle_up','square', 'star','hexagon','+','x','diamond']
shape_symbols =['o', 'v', '^', 's', '*', 'h', '+', 'x', 'd']
self.shape_choices_dict = dict(list(zip(shape_choices, shape_symbols)))
self.bSizer5 = pw.radio_buttons(pnl, shape_choices, "choose shape for plot points")
#---sizer 5a---
#self.bSizer5a = pw.labeled_text_field(pnl, "point size (default is 5)")
self.bSizer5a = pw.labeled_spin_ctrl(pnl, "point size (default is 5): ")
self.bSizer5b = pw.check_box(pnl, "Show lines connecting points")
self.bSizer5b.cb.SetValue(True)
self.Bind(wx.EVT_TEXT, self.change_file_path, self.bSizer0.file_path)
#---sizer 0a---
self.bSizer0a = pw.choose_file(pnl, btn_text='add pmag_specimens file', method = self.on_add_pmag_specimens_button, remove_button="Don't use pmag specimens file")
pmag_spec_file = os.path.join(self.WD, 'pmag_specimens.txt')
self.check_and_add_file(pmag_spec_file, self.bSizer0a.file_path)
#--- plotting stuff for pmag_specimens
self.bSizer0a1 = pw.radio_buttons(pnl, color_choices, "choose color for plot points")
# set default color to red
self.bSizer0a1.radio_buttons[2].SetValue(True)
self.bSizer0a2 = pw.radio_buttons(pnl, shape_choices, "choose shape for plot points")
# set default symbol:
self.bSizer0a2.radio_buttons[2].SetValue(True)
self.bSizer0a3 = pw.labeled_spin_ctrl(pnl, "point size (default is 5): ")
self.Bind(wx.EVT_TEXT, self.change_results_path, self.bSizer0a.file_path)
#---sizer 1 ---
self.bSizer1a = pw.labeled_yes_or_no(pnl, "Choose file to provide sample data", "er_samples", "er_ages")
self.Bind(wx.EVT_RADIOBUTTON, self.on_sample_or_age, self.bSizer1a.rb1)
self.Bind(wx.EVT_RADIOBUTTON, self.on_sample_or_age, self.bSizer1a.rb2)
self.bSizer1 = pw.choose_file(pnl, btn_text='add er_samples file', method = self.on_add_samples_button)
sampfile = os.path.join(self.WD, 'er_samples.txt')
self.check_and_add_file(sampfile, self.bSizer1.file_path)
#---sizer 2 ----
self.bSizer2 = pw.choose_file(pnl, btn_text='add IODP core summary csv file (optional)', method = self.on_add_csv_button)
#---sizer 3 ---
plot_choices = ['Plot declination', 'Plot inclination', 'Plot magnetization', 'Plot magnetization on log scale']
self.bSizer3 = pw.check_boxes(pnl, (5, 1, 0, 0), plot_choices, "Choose what to plot:")
self.bSizer3.boxes[0].SetValue(True)
self.bSizer3.boxes[1].SetValue(True)
self.bSizer3.boxes[2].SetValue(True)
self.bSizer3.boxes[3].SetValue(True)
#---sizer 13---
protocol_choices = ['AF', 'T', 'ARM', 'IRM']#, 'X'] not supporting susceptibility at the moment
self.bSizer13 = pw.radio_buttons(pnl, protocol_choices, "Lab Protocol: ", orientation=wx.HORIZONTAL)
self.bSizer14 = pw.labeled_text_field(pnl, "Step: ")
#self.bSizer15 = pw.check_box(pnl, "Do not plot blanket treatment data")
self.bSizer16 = pw.radio_buttons(pnl, ['svg', 'eps', 'pdf', 'png'], "Save plot in this format:")
#---sizer 8 ---
self.bSizer8 = pw.labeled_yes_or_no(pnl, "Depth scale", "Meters below sea floor (mbsf)", "Meters composite depth (mcd)")
#---sizer 6 ---
self.bSizer6 = pw.labeled_text_field(pnl, label="minimum depth to plot (in meters)")
#---sizer 7---
self.bSizer7 = pw.labeled_text_field(pnl, label="maximum depth to plot (in meters)")
#---sizer 9 ---
self.bSizer9 = pw.check_box(pnl, "Plot GPTS?")
self.Bind(wx.EVT_CHECKBOX, self.on_checkbox, self.bSizer9.cb)
# if plotting GPTS, these sizers will be shown:
#self.bSizer10 = pw.labeled_yes_or_no(pnl, "Time scale", "gts04", "ck95")
choices = ["gts12", "gts04", "ck95"]
self.bSizer10 = pw.radio_buttons(pnl, choices, label="Time scale")
self.bSizer11 = pw.labeled_text_field(pnl, label="Minimum age (in Ma)")
self.bSizer12 = pw.labeled_text_field(pnl, label="Maximum age (in Ma)")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
#---make all the smaller container boxes---
vbox = wx.BoxSizer(wx.VERTICAL)
box1 = wx.StaticBox(pnl)
box2 = wx.StaticBox(pnl)
box3 = wx.StaticBox(pnl)
box4 = wx.StaticBox(pnl)
box5 = wx.StaticBox(pnl)
self.vbox1 = wx.StaticBoxSizer(box1, wx.VERTICAL)
vbox2 = wx.StaticBoxSizer(box2, wx.VERTICAL)
vbox3 = wx.StaticBoxSizer(box3, wx.VERTICAL)
vbox4 = wx.StaticBoxSizer(box4, wx.VERTICAL)
self.vbox5 = wx.StaticBoxSizer(box5, wx.VERTICAL)
mini_vbox = wx.BoxSizer(wx.VERTICAL)
hbox0 = wx.BoxSizer(wx.HORIZONTAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
#---Plot type and format ---
hbox0.AddMany([self.bSizer3, self.bSizer16])
#---Plot display options---
mini_vbox.AddMany([self.bSizer5a, self.bSizer5b])
hbox1.Add(self.bSizer4)
hbox1.Add(self.bSizer5, flag=wx.ALIGN_LEFT)
hbox1.Add(mini_vbox, flag=wx.ALIGN_LEFT)
self.vbox1.Add(wx.StaticText(pnl, label="Plot display options for measurements data"))
self.vbox1.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
# more plot display options
hbox5.AddMany([self.bSizer0a1, self.bSizer0a2, self.bSizer0a3])
self.vbox5.Add(wx.StaticText(pnl, label="Plot display options for specimens data"))
self.vbox5.Add(hbox5)
#---depths to plot ---
hbox2.Add(self.bSizer6, flag=wx.ALIGN_LEFT)#|wx.LEFT, border=5)
hbox2.Add(self.bSizer7, flag=wx.ALIGN_LEFT)
vbox2.Add(wx.StaticText(pnl, label="Specify depths to plot (optional)"))
vbox2.Add(hbox2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#---time scale ----
hbox3.Add(self.bSizer9, flag=wx.ALIGN_LEFT)
hbox3.Add(self.bSizer10, flag=wx.ALIGN_LEFT)#|wx.LEFT, border=5)
hbox3.Add(self.bSizer11, flag=wx.ALIGN_LEFT)
hbox3.Add(self.bSizer12, flag=wx.ALIGN_LEFT)
vbox3.Add(wx.StaticText(pnl, label="Specify time scale to plot (optional)"))
vbox3.Add(hbox3)
#---experiment type and step
hbox4.Add(self.bSizer13, flag=wx.ALIGN_LEFT)
hbox4.Add(self.bSizer14, flag=wx.ALIGN_LEFT)
vbox4.Add(wx.StaticText(pnl, label="Experiment type"))
vbox4.Add(hbox4)
#vbox4.Add(self.bSizer15)
#---add all widgets to main container---
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.vbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.vbox5, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox0)
vbox.Add(vbox4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer8, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(vbox2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(vbox3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#--- add buttons ---
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.hbox_all = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_all.AddSpacer(20)
self.hbox_all.Add(vbox)
self.panel.SetSizer(self.hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
# hide plotting stuff
# no longer hiding this initially -- it causes a sizing nightmare
#if not self.bSizer0.file_path.GetValue():
#self.vbox1.ShowItems(False)
#if not self.bSizer0a.file_path.GetValue():
#self.vbox5.ShowItems(False)
self.hbox_all.Fit(self)
# hide gpts stuff
self.bSizer10.ShowItems(False)
self.bSizer11.ShowItems(False)
self.bSizer12.ShowItems(False)
self.Show()
self.Centre()
def change_results_path(self, event):
txt_ctrl = event.GetEventObject()
if txt_ctrl.GetValue():
self.vbox5.ShowItems(True)
self.panel.Layout() # resizes scrolled window
#self.hbox_all.Fit(self) # resizes entire frame
else:
self.vbox5.ShowItems(False)
self.panel.Layout()
def change_file_path(self, event):
txt_ctrl = event.GetEventObject()
if txt_ctrl.GetValue():
self.vbox1.ShowItems(True)
self.panel.Layout() # resizes scrolled window
#self.hbox_all.Fit(self) # resizes entire frame
else:
self.vbox1.ShowItems(False)
self.panel.Layout()
def on_add_measurements_button(self, event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_add_pmag_specimens_button(self, event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0a, text)
def on_sample_or_age(self, event):
if event.GetId() == self.bSizer1a.rb1.GetId():
self.bSizer1.add_file_button.SetLabel('add er_samples_file')
self.check_and_add_file(os.path.join(self.WD, 'er_samples.txt'), self.bSizer1.file_path)
else:
self.bSizer1.add_file_button.SetLabel('add er_ages_file')
self.check_and_add_file(os.path.join(self.WD, 'er_ages.txt'), self.bSizer1.file_path)
def check_and_add_file(self, infile, add_here):
if os.path.isfile(infile):
add_here.SetValue(infile)
def on_add_samples_button(self, event):
text = "provide er_samples/er_ages file"
pw.on_add_file_button(self.bSizer1, text)
def on_add_csv_button(self, event):
text = "provide csv file (optional)"
pw.on_add_file_button(self.bSizer2, text)
def on_checkbox(self, event):
if event.Checked():
self.bSizer10.ShowItems(True)
self.bSizer11.ShowItems(True)
self.bSizer12.ShowItems(True)
else:
self.bSizer10.ShowItems(False)
self.bSizer11.ShowItems(False)
self.bSizer12.ShowItems(False)
self.panel.Layout()
#self.hbox_all.Fit(self)
def on_okButton(self, event):
"""
meas_file # -f magic_measurements_file
samp_file #-fsa er_samples_file
age_file # -fa er_ages_file
depth_scale # -ds scale
dmin, dmax # -d 1 50 # depth to plot
timescale, amin, amax (also sets pTS, pcol, width) = # -ts scale min max
sym, size # -sym symbol size
method, step (also may set suc_key) # -LP protocol step
pltDec (also sets pcol, pel, width)# -D (don't plot dec)
pltInc (also sets pcol, pel, width)# -I (don't plot inc)
pltMag (also sets pcol, pel, width)# -M (don't plot intensity)
logit # -log ( plot log scale)
fmt # -fmt format
"""
def check_input_dir_path(input_dir_path, new_dir_path):
if input_dir_path and input_dir_path != new_dir_path:
pw.simple_warning("Please make sure that all input files come from the same directory")
return False
if not input_dir_path and new_dir_path:
return new_dir_path
elif input_dir_path == new_dir_path:
return input_dir_path
wait = wx.BusyInfo('Making plots, please wait...')
wx.SafeYield()
os.chdir(self.WD)
input_dir_path = None
meas_file = self.bSizer0.return_value()
if meas_file:
input_dir_path, meas_file = os.path.split(meas_file)
pmag_spec_file = self.bSizer0a.return_value()
if pmag_spec_file:
new_dir_path, pmag_spec_file = os.path.split(pmag_spec_file)
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
sum_file = self.bSizer2.return_value()
if sum_file:
new_dir_path, sum_file = os.path.split(sum_file)
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
spec_sym, spec_sym_shape, spec_sym_color, spec_sym_size = "", "", "", ""
if pmag_spec_file:
# get symbol/size for dots
spec_sym_shape = self.shape_choices_dict[self.bSizer0a2.return_value()]
spec_sym_color = self.bSizer0a1.return_value()[0]
spec_sym_size = self.bSizer0a3.return_value()
spec_sym = str(spec_sym_color) + str(spec_sym_shape)
use_sampfile = self.bSizer1a.return_value()
if use_sampfile:
new_dir_path, samp_file = os.path.split(str(self.bSizer1.return_value()))
age_file = ''
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
else:
samp_file = ''
new_dir_path, age_file = os.path.split(self.bSizer1.return_value())
input_dir_path = check_input_dir_path(input_dir_path, new_dir_path)
if not input_dir_path:
del wait
return False
depth_scale = self.bSizer8.return_value()
if age_file:
depth_scale='age'
elif depth_scale:
depth_scale = 'sample_core_depth' #'mbsf'
else:
depth_scale = 'sample_composite_depth' #'mcd'
dmin = self.bSizer6.return_value()
dmax = self.bSizer7.return_value()
if self.bSizer9.return_value(): # if plot GPTS is checked
pltTime = 1
timescale = self.bSizer10.return_value()
amin = self.bSizer11.return_value()
amax = self.bSizer12.return_value()
if not amin or not amax:
del wait
pw.simple_warning("If plotting timescale, you must provide both a lower and an upper bound.\nIf you don't want to plot timescale, uncheck the 'Plot GPTS' checkbox")
return False
else: # if plot GPTS is not checked
pltTime, timescale, amin, amax = 0, '', -1, -1
sym_shape = self.shape_choices_dict[self.bSizer5.return_value()]
sym_color = self.bSizer4.return_value()[0]
sym = sym_color + sym_shape
size = self.bSizer5a.return_value()
pltLine = self.bSizer5b.return_value()
if pltLine:
pltLine = 1
else:
pltLine = 0
method = str(self.bSizer13.return_value())
step = self.bSizer14.return_value()
if not step:
step = 0
method = 'LT-NO'
#if not step:
# #-LP [AF,T,ARM,IRM, X] step [in mT,C,mT,mT, mass/vol] to plot
# units_dict = {'AF': 'millitesla', 'T': 'degrees C', 'ARM': 'millitesla', 'IRM': 'millitesla', 'X': 'mass/vol'}
#unit = units_dict[method]
#pw.simple_warning("You must provide the experiment step in {}".format(unit))
#return False
pltDec, pltInc, pltMag, logit = 0, 0, 0, 0
for val in self.bSizer3.return_value():
if 'declination' in val:
pltDec = 1
if 'inclination' in val:
pltInc = 1
if 'magnetization' in val:
pltMag = 1
if 'log' in val:
logit = 1
#pltSus = self.bSizer15.return_value()
#if pltSus:
# pltSus = 0
#else:
# pltSus = 1
fmt = self.bSizer16.return_value()
#print "meas_file", meas_file, "pmag_spec_file", pmag_spec_file, "spec_sym_shape", spec_sym_shape, "spec_sym_color", spec_sym_color, "spec_sym_size", spec_sym_size, "samp_file", samp_file, "age_file", age_file, "depth_scale", depth_scale, "dmin", dmin, "dmax", dmax, "timescale", timescale, "amin", amin, "amax", amax, "sym", sym, "size", size, "method", method, "step", step, "pltDec", pltDec, "pltInc", pltInc, "pltMag", pltMag, "pltTime", pltTime, "logit", logit, "fmt", fmt
# for use as module:
#print "pltLine:", pltLine
#print "pltSus:", pltSus
fig, figname = ipmag.core_depthplot(input_dir_path or self.WD, meas_file, pmag_spec_file, samp_file, age_file, sum_file, '', depth_scale, dmin, dmax, sym, size, spec_sym, spec_sym_size, method, step, fmt, pltDec, pltInc, pltMag, pltLine, 1, logit, pltTime, timescale, amin, amax)
if fig:
self.Destroy()
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
plot_frame = PlotFrame((pixel_width, pixel_height + 50), fig, figname)
del wait
return plot_frame
else:
del wait
pw.simple_warning("No data points met your criteria - try again\nError message: {}".format(figname))
return False
# for use as command_line:
if meas_file:
meas_file = os.path.split(meas_file)[1]
meas_file = pmag.add_flag(meas_file, '-f')
if pmag_spec_file:
pmag_spec_file = os.path.split(pmag_spec_file)[1]
pmag_spec_file = pmag.add_flag(pmag_spec_file, '-fsp')
pmag_spec_file = pmag_spec_file + ' ' + spec_sym_color + spec_sym_shape + ' ' + str(spec_sym_size)
sym = '-sym ' + sym + ' ' + str(size)
if samp_file:
samp_file = os.path.split(samp_file)[1]
samp_file = pmag.add_flag(samp_file, '-fsa')
if age_file:
age_file = os.path.split(age_file)[1]
age_file = pmag.add_flag(age_file, '-fa')
depth_scale = pmag.add_flag(depth_scale, '-ds')
depth_range = ''
if dmin and dmax:
depth_range = '-d ' + str(dmin) + ' ' + str(dmax)
if pltTime and amin and amax:
timescale = '-ts ' + timescale + ' ' + str(amin) + ' ' + str(amax)
else:
timescale = ''
method = pmag.add_flag(method, '-LP') + ' ' + str(step)
#if not pltSus:
# pltSus = "-L"
#else:
# pltSus = ''
if not pltDec:
pltDec = "-D"
else:
pltDec = ''
if not pltInc:
pltInc = "-I"
else:
pltInc = ''
if not pltMag:
pltMag = "-M"
else:
pltMag = ''
if pltLine:
pltLine = ""
else:
pltLine = '-L' # suppress line
if logit:
logit = "-log"
else:
logit = ''
fmt = pmag.add_flag(fmt, '-fmt')
COMMAND = "core_depthplot.py {meas_file} {pmag_spec_file} {sym} {samp_file} {age_file} {depth_scale} {depth_range} {timescale} {method} {pltDec} {pltInc} {pltMag} {logit} {fmt} {pltLine} -WD {WD}".format(meas_file=meas_file, pmag_spec_file=pmag_spec_file, sym=sym, samp_file=samp_file, age_file=age_file, depth_scale=depth_scale, depth_range=depth_range, timescale=timescale, method=method, pltDec=pltDec, pltInc=pltInc, pltMag=pltMag, logit=logit, fmt=fmt, pltLine=pltLine, WD=self.WD)
print(COMMAND)
#os.system(COMMAND)
"""
haven't done these options yet
wt_file (also sets norm)# -n specimen_filename
spc_file, spc_sym, spc_size # -fsp spec_file symbol_shape symbol_size
res_file, res_sym, res_size # -fres pmag_results_file symbol_shape symbol_size
wig_file (also sets pcol, width) # -fwig wiggle_file(???)
sum_file # -fsum IODP_core_summary_csv_file
(sets plots & verbose) # -sav
"""
#pw.run_command_and_close_window(self, COMMAND, "er_samples.txt")
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=ipmag.core_depthplot.__doc__)
class Ani_depthplot(wx.Frame):
title = "Plot anisotropoy vs. depth/height/age"
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title, name='aniso_depthplot')
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "Anisotropy data can be plotted versus depth.\nThe program ANI_depthplot.py uses MagIC formatted data tables of the rmag_anisotropy.txt and er_samples.txt types.\nrmag_anisotropy.txt stores the tensor elements and measurement meta-data while er_samples.txt stores the depths, location and other information.\nBulk susceptibility measurements can also be plotted if they are available in a magic_measurements.txt formatted file."
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, btn_text='add rmag_anisotropy file', method = self.on_add_rmag_button, remove_button='remove rmag_anisotropy file')
self.check_and_add_file(os.path.join(self.WD, 'rmag_anisotropy.txt'), self.bSizer0.file_path)
#---sizer 1 ----
self.bSizer1 = pw.choose_file(pnl, btn_text='add magic_measurements file', method = self.on_add_measurements_button, remove_button='remove magic_measurements file')
self.check_and_add_file(os.path.join(self.WD, 'magic_measurements.txt'), self.bSizer1.file_path)
#---sizer 2 ---
self.bSizer2a = pw.labeled_yes_or_no(pnl, "Choose file to provide sample data", "er_samples", "er_ages")
self.Bind(wx.EVT_RADIOBUTTON, self.on_sample_or_age, self.bSizer2a.rb1)
self.Bind(wx.EVT_RADIOBUTTON, self.on_sample_or_age, self.bSizer2a.rb2)
self.bSizer2 = pw.choose_file(pnl, btn_text='add er_samples file', method = self.on_add_samples_button)
sampfile = os.path.join(self.WD, 'er_samples.txt')
self.check_and_add_file(sampfile, self.bSizer2.file_path)
#---sizer 2b---
self.bSizer2b = pw.choose_file(pnl, btn_text="Add core summary file (optional)", method = self.on_add_summary_button)
#---sizer 3---
self.bSizer3 = pw.radio_buttons(pnl, ['svg', 'eps', 'pdf', 'png'], "Save plot in this format:")
#---sizer 4 ---
self.bSizer4 = pw.labeled_yes_or_no(pnl, "Depth scale", "Meters below sea floor (mbsf)", "Meters composite depth (mcd)")
#---sizer 5 ---
self.bSizer5 = pw.labeled_text_field(pnl, label="minimum depth to plot (in meters)")
#---sizer 6---
self.bSizer6 = pw.labeled_text_field(pnl, label="maximum depth to plot (in meters)")
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2a, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2b, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.AddMany([self.bSizer5, self.bSizer6])
vbox.Add(hbox1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_rmag_button(self,event):
text = "choose rmag_anisotropy file"
pw.on_add_file_button(self.bSizer0, text)
def on_add_measurements_button(self,event):
text = "choose magic_measurements file"
pw.on_add_file_button(self.bSizer1, text)
def on_add_samples_button(self, event):
text = "provide er_samples/er_ages file"
pw.on_add_file_button(self.bSizer2, text)
def on_sample_or_age(self, event):
if event.GetId() == self.bSizer2a.rb1.GetId():
self.bSizer2.add_file_button.SetLabel('add er_samples_file')
self.check_and_add_file(os.path.join(self.WD, 'er_samples.txt'), self.bSizer2.file_path)
else:
self.bSizer2.add_file_button.SetLabel('add er_ages_file')
self.check_and_add_file(os.path.join(self.WD, 'er_ages.txt'), self.bSizer2.file_path)
def on_add_summary_button(self, event):
pw.on_add_file_button(self.bSizer2b, text="provide csv format core summary file")
def check_and_add_file(self, infile, add_here):
if os.path.isfile(infile):
add_here.SetValue(infile)
def on_okButton(self, event):
wait = wx.BusyInfo('Making plots, please wait...')
wx.SafeYield()
os.chdir(self.WD)
ani_file = self.bSizer0.return_value()
meas_file = self.bSizer1.return_value()
use_sampfile = self.bSizer2a.return_value()
samp_file, age_file = None, None
if use_sampfile:
samp_file = self.bSizer2.return_value()
else:
age_file = self.bSizer2.return_value()
sum_file = self.bSizer2b.return_value()
if sum_file:
sum_file = os.path.split(sum_file)[1]
fmt = self.bSizer3.return_value()
depth_scale = self.bSizer4.return_value()
print('age_file', age_file)
if age_file:
depth_scale='age'
elif depth_scale:
depth_scale = 'sample_core_depth' #'mbsf'
else:
depth_scale = 'sample_composite_depth' #'mcd'
dmin = self.bSizer5.return_value() or -1
dmax = self.bSizer6.return_value() or -1
# for use as module:
fig, figname = ipmag.ani_depthplot2(ani_file, meas_file, samp_file, age_file, sum_file, fmt, float(dmin), float(dmax), depth_scale)
if fig:
self.Destroy()
dpi = fig.get_dpi()
pixel_width = dpi * fig.get_figwidth()
pixel_height = dpi * fig.get_figheight()
del wait
plot_frame = PlotFrame((pixel_width, pixel_height + 50), fig, figname)
else:
del wait
pw.simple_warning("No data points met your criteria - try again\nError message: {}".format(figname))
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text=ipmag.ani_depthplot2.__doc__)
class something(wx.Frame):
title = ""
def __init__(self, parent, WD):
wx.Frame.__init__(self, parent, wx.ID_ANY, self.title)
self.panel = wx.ScrolledWindow(self)
self.WD = WD
self.InitUI()
def InitUI(self):
pnl = self.panel
TEXT = "some text"
bSizer_info = wx.BoxSizer(wx.HORIZONTAL)
bSizer_info.Add(wx.StaticText(pnl, label=TEXT), wx.ALIGN_LEFT)
#---sizer 0 ----
self.bSizer0 = pw.choose_file(pnl, 'add', method = self.on_add_file_button)
#---sizer 1 ----
self.bSizer1 = pw.specimen_n(pnl)
#---sizer 2 ---
self.bSizer2 = pw.select_ncn(pnl)
#---sizer 3 ---
self.bSizer3 = pw.labeled_text_field(pnl, label="Location name:")
#---sizer 4 ---
#---sizer 4 ----
#try:
# open(self.WD + "/er_samples.txt", "r")
#except Exception as ex:
# er_samples_file_present = False
#if er_samples_file_present:
# self.bSizer4 = pw.labeled_yes_or_no(pnl, TEXT, label1, label2)
#---buttons ---
hboxok = pw.btn_panel(self, pnl)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.LEFT, border=5)
hbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT)
vbox.Add(bSizer_info, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer0, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer1, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(self.bSizer2, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
vbox.Add(hbox, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#vbox.Add(self.bSizer3, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#try:
# vbox.Add(self.bSizer4, flag=wx.ALIGN_LEFT|wx.TOP, border=10)
#except AttributeError:
# pass
vbox.Add(hboxok, flag=wx.ALIGN_CENTER)
vbox.AddSpacer(20)
hbox_all = wx.BoxSizer(wx.HORIZONTAL)
hbox_all.AddSpacer(20)
hbox_all.Add(vbox)
self.panel.SetSizer(hbox_all)
self.panel.SetScrollbars(20, 20, 50, 50)
hbox_all.Fit(self)
self.Show()
self.Centre()
def on_add_file_button(self,event):
text = "choose file to convert to MagIC"
pw.on_add_file_button(self.bSizer0, text)
def on_okButton(self, event):
os.chdir(self.WD)
COMMAND = ""
print(COMMAND)
#pw.run_command_and_close_window(self, COMMAND, "er_samples.txt")
def on_cancelButton(self,event):
self.Destroy()
self.Parent.Raise()
def on_helpButton(self, event):
pw.on_helpButton(text='')
# File
class ClearWD(wx.MessageDialog):
def __init__(self, parent, WD):
msg = "Are you sure you want to delete the contents of:\n{} ?\nThis action cannot be undone".format(WD)
super(ClearWD, self).__init__(None, caption="Not so fast", message=msg, style=wx.YES_NO|wx.NO_DEFAULT|wx.ICON_EXCLAMATION)
self.WD = WD
def do_clear(self):
result = self.ShowModal()
self.Destroy()
if result == wx.ID_YES:
os.chdir('..')
import shutil
shutil.rmtree(self.WD)
os.mkdir(self.WD)
os.chdir(self.WD)
return True
else:
print("{} has not been emptied".format(self.WD))
return False
#consider using this instead (will preserve permissions of directory, but this may or may not be crucial)
#def emptydir(top):
# if(top == '/' or top == "\\"): return
# else:
# for root, dirs, files in os.walk(top, topdown=False):
# for name in files:
# os.remove(os.path.join(root, name))
# for name in dirs:
# os.rmdir(os.path.join(root, name))
class PlotFrame(wx.Frame):
def __init__(self, size, figure, figname, standalone=False):
super(PlotFrame, self).__init__(None, -1, size=size)
self.figure = figure
self.figname = figname
self.standalone = standalone
panel = wx.Panel(self, -1)
canvas = FigureCanvas(panel, -1, self.figure)
btn_panel = self.make_btn_panel(panel)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW) # having/removing wx.GROW doesn't matter
sizer.Add(btn_panel, flag=wx.CENTRE|wx.ALL, border=5)
panel.SetSizer(sizer)
sizer.Fit(panel) # MIGHT HAVE TO TAKE THIS LINE OUT!!!
self.Centre()
self.Show()
def make_btn_panel(self, parent):
hbox = wx.BoxSizer(wx.HORIZONTAL)
btn_save = wx.Button(parent, -1, "Save plot")
self.Bind(wx.EVT_BUTTON, self.on_save, btn_save)
btn_discard = wx.Button(parent, -1, "Discard plot")
self.Bind(wx.EVT_BUTTON, self.on_discard, btn_discard)
hbox.AddMany([(btn_save, 1, wx.RIGHT, 5), (btn_discard)])
return hbox
def on_save(self, event):
plt.savefig(self.figname)
plt.clf() # clear figure
dir_path, figname = os.path.split(self.figname)
if not dir_path:
dir_path = os.getcwd()
dir_path = os.path.abspath(dir_path)
dlg = wx.MessageDialog(None, message="Plot saved in directory:\n{}\nas {}".format(dir_path, figname), style=wx.OK)
dlg.ShowModal()
dlg.Destroy()
self.Destroy()
if self.standalone:
sys.exit()
def on_discard(self, event):
dlg = wx.MessageDialog(self, "Are you sure you want to delete this plot?", "Not so fast", style=wx.YES_NO|wx.NO_DEFAULT|wx.ICON_EXCLAMATION)
response = dlg.ShowModal()
if response == wx.ID_YES:
plt.clf() # clear figure
dlg.Destroy()
self.Destroy()
if self.standalone:
sys.exit()
| bsd-3-clause |
percyfal/snakemakelib-core | snakemakelib/odo/picard.py | 1 | 2797 | # Copyright (C) 2015 by Per Unneberg
from blaze import resource, DataFrame
import pandas as pd
import re
from .pandas import annotate_by_uri
from snakemakelib.log import LoggerManager
smllogger = LoggerManager().getLogger(__name__)
def _hist_reader(uri):
with open(uri) as fh:
data = [x.strip("\n").split("\t") for x in fh
if not x.strip() == ""]
indices = list((i for i, val in enumerate(data)
if val[0].startswith("## METRICS CLASS")
or val[0].startswith("## HISTOGRAM")))
if len(indices) == 1:
indices.append(len(data))
metrics = DataFrame.from_records(data[(indices[0]+2):(indices[1])],
columns=data[(indices[0]+1)])
# We could be missing the histogram
try:
hist = DataFrame.from_records(data[(indices[1]+2):],
columns = data[(indices[1]+1)])
except:
smllogger.warn("No histogram data for {}".format(uri))
hist = None
return (metrics, hist)
def _reader(uri):
with open(uri) as fh:
data = [x.strip("\n").split("\t") for x in fh
if not x.strip() == ""]
indices = list((i for i, val in enumerate(data)
if val[0].startswith("## METRICS CLASS")))
metrics = DataFrame.from_records(data[(indices[0]+2):],
columns=data[(indices[0]+1)],
index="CATEGORY")
return (metrics, None)
@resource.register('.+\.align_metrics')
@annotate_by_uri
def resource_align_metrics(uri, **kwargs):
metrics, _ = _reader(uri)
metrics = metrics.apply(pd.to_numeric, axis=1)
return metrics
@resource.register('.+\.insert_metrics')
@annotate_by_uri
def resource_insert_metrics(uri, key="metrics", **kwargs):
(_metrics, hist) = _hist_reader(uri)
metrics = _metrics[_metrics.columns.difference(["PAIR_ORIENTATION"])].apply(pd.to_numeric, axis=0)
metrics["PAIR_ORIENTATION"] = _metrics["PAIR_ORIENTATION"]
hist = hist.apply(pd.to_numeric, axis=0)
if key == "metrics":
return metrics
elif key == "hist":
return hist
@resource.register('.+\.hs_metrics')
@annotate_by_uri
def resource_hs_metrics(uri, **kwargs):
return _hist_reader(uri)
@resource.register('.+\.dup_metrics')
@annotate_by_uri
def resource_dup_metrics(uri, key="metrics", **kwargs):
(_metrics, hist) = _hist_reader(uri)
metrics = _metrics[_metrics.columns.difference(["LIBRARY"])].apply(pd.to_numeric, axis=0)
if not hist is None:
hist = hist.apply(pd.to_numeric, axis=0)
if key == "metrics":
return metrics
elif key == "hist":
return hist
| mit |
huzq/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 19 | 17638 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import fails_if_pypy
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
pytestmark = fails_if_pypy
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert X.indptr.shape[0] == 7
assert X.shape[0] == 6
assert X.shape[1] == 21
assert y.shape[0] == 6
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert X[i, j] == val
# tests X's zero values
assert X[0, 3] == 0
assert X[0, 5] == 0
assert X[1, 8] == 0
assert X[1, 16] == 0
assert X[2, 18] == 0
# test can change X's values
X[0, 2] *= 2
assert X[0, 2] == 5
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_almost_equal(X1.data, X2.data)
assert_array_almost_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert y == [(0, 1), (2,), (), (1, 2)]
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_almost_equal(y_train, y_test)
assert X_train.dtype == np.float32
assert X_test.dtype == np.float32
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert X1.dtype == X2.dtype
assert X2.dtype == X3.dtype
assert X3.dtype == np.float64
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert X.indptr.shape[0] == 7
assert X.shape[0] == 6
assert X.shape[1] == 22
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert X[i, j] == val
# 21 features in file
with pytest.raises(ValueError):
load_svmlight_file(datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
with gzip.open(tmp.name, "wb") as fh_out:
shutil.copyfileobj(f, fh_out)
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xgz.toarray())
assert_array_almost_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
with BZ2File(tmp.name, "wb") as fh_out:
shutil.copyfileobj(f, fh_out)
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_almost_equal(X.toarray(), Xbz.toarray())
assert_array_almost_equal(y, ybz)
def test_load_invalid_file():
with pytest.raises(ValueError):
load_svmlight_file(invalidfile)
def test_load_invalid_order_file():
with pytest.raises(ValueError):
load_svmlight_file(invalidfile2)
def test_load_zero_based():
f = BytesIO(b"-1 4:1.\n1 0:1\n")
with pytest.raises(ValueError):
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b"-1 1:1 2:2 3:3\n"
data2 = b"-1 0:0 1:1\n"
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert X.shape == (1, 3)
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert X1.shape == (1, 4)
assert X2.shape == (1, 4)
def test_load_with_qid():
# load svmfile with qid attribute
data = b"""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12"""
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@pytest.mark.skip("testing the overflow of 32 bit sparse indexing requires a"
" large amount of memory")
def test_load_large_qid():
"""
load large libsvm / svmlight file with qid attribute. Tests 64-bit query ID
"""
data = b"\n".join(("3 qid:{0} 1:0.53 2:0.12\n2 qid:{0} 1:0.13 2:0.1"
.format(i).encode() for i in range(1, 40*1000*1000)))
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
assert_array_equal(y[-4:], [3, 2, 3, 2])
assert_array_equal(np.unique(qid), np.arange(1, 40*1000*1000))
def test_load_invalid_file2():
with pytest.raises(ValueError):
load_svmlight_files([datafile, invalidfile, datafile])
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
with pytest.raises(TypeError):
load_svmlight_file(.42)
def test_invalid_filename():
with pytest.raises(IOError):
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
# Note: with dtype=np.int32 we are performing unsafe casts,
# where X.astype(dtype) overflows. The result is
# then platform dependent and X_dense.astype(dtype) may be
# different from X_sparse.astype(dtype).asarray().
X_input = X.astype(dtype)
dump_svmlight_file(X_input, y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
comment = str(comment, "utf-8")
assert "scikit-learn %s" % sklearn.__version__ in comment
comment = f.readline()
comment = str(comment, "utf-8")
assert ["one", "zero"][zero_based] + "-based" in comment
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert X2.dtype == dtype
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if sp.issparse(X_input):
X_input_dense = X_input.toarray()
else:
X_input_dense = X_input
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_input_dense, X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype, copy=False), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_input_dense, X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype, copy=False), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert f.readline() == b"1 0:1 2:3 4:5\n"
assert f.readline() == b"0,2 \n"
assert f.readline() == b"0,1 1:5 3:1\n"
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert (f.readline() ==
b"1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n")
assert f.readline() == b"2.1 0:1000000000 1:2e+18 2:3e+27\n"
assert f.readline() == b"3.01 \n"
assert f.readline() == b"1.000000000000001 \n"
assert f.readline() == b"1 \n"
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b"It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc"
f = BytesIO()
with pytest.raises(UnicodeDecodeError):
dump_svmlight_file(X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_almost_equal(y, y2)
f = BytesIO()
with pytest.raises(ValueError):
dump_svmlight_file(X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
with pytest.raises(ValueError):
dump_svmlight_file(X, y2d, f)
f = BytesIO()
with pytest.raises(ValueError):
dump_svmlight_file(X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b"""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985"""
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
def test_load_zeros():
f = BytesIO()
true_X = sp.csr_matrix(np.zeros(shape=(3, 4)))
true_y = np.array([0, 1, 0])
dump_svmlight_file(true_X, true_y, f)
for zero_based in ['auto', True, False]:
f.seek(0)
X, y = load_svmlight_file(f, n_features=4, zero_based=zero_based)
assert_array_almost_equal(y, true_y)
assert_array_almost_equal(X.toarray(), true_X.toarray())
@pytest.mark.parametrize('sparsity', [0, 0.1, .5, 0.99, 1])
@pytest.mark.parametrize('n_samples', [13, 101])
@pytest.mark.parametrize('n_features', [2, 7, 41])
def test_load_with_offsets(sparsity, n_samples, n_features):
rng = np.random.RandomState(0)
X = rng.uniform(low=0.0, high=1.0, size=(n_samples, n_features))
if sparsity:
X[X < sparsity] = 0.0
X = sp.csr_matrix(X)
y = rng.randint(low=0, high=2, size=n_samples)
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
size = len(f.getvalue())
# put some marks that are likely to happen anywhere in a row
mark_0 = 0
mark_1 = size // 3
length_0 = mark_1 - mark_0
mark_2 = 4 * size // 5
length_1 = mark_2 - mark_1
# load the original sparse matrix into 3 independent CSR matrices
X_0, y_0 = load_svmlight_file(f, n_features=n_features,
offset=mark_0, length=length_0)
X_1, y_1 = load_svmlight_file(f, n_features=n_features,
offset=mark_1, length=length_1)
X_2, y_2 = load_svmlight_file(f, n_features=n_features,
offset=mark_2)
y_concat = np.concatenate([y_0, y_1, y_2])
X_concat = sp.vstack([X_0, X_1, X_2])
assert_array_almost_equal(y, y_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
def test_load_offset_exhaustive_splits():
rng = np.random.RandomState(0)
X = np.array([
[0, 0, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 6],
[1, 2, 3, 4, 0, 6],
[0, 0, 0, 0, 0, 0],
[1, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0],
])
X = sp.csr_matrix(X)
n_samples, n_features = X.shape
y = rng.randint(low=0, high=2, size=n_samples)
query_id = np.arange(n_samples) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id)
f.seek(0)
size = len(f.getvalue())
# load the same data in 2 parts with all the possible byte offsets to
# locate the split so has to test for particular boundary cases
for mark in range(size):
f.seek(0)
X_0, y_0, q_0 = load_svmlight_file(f, n_features=n_features,
query_id=True, offset=0,
length=mark)
X_1, y_1, q_1 = load_svmlight_file(f, n_features=n_features,
query_id=True, offset=mark,
length=-1)
q_concat = np.concatenate([q_0, q_1])
y_concat = np.concatenate([y_0, y_1])
X_concat = sp.vstack([X_0, X_1])
assert_array_almost_equal(y, y_concat)
assert_array_equal(query_id, q_concat)
assert_array_almost_equal(X.toarray(), X_concat.toarray())
def test_load_with_offsets_error():
with pytest.raises(ValueError, match="n_features is required"):
load_svmlight_file(datafile, offset=3, length=3)
| bsd-3-clause |
PeterRochford/SkillMetrics | Examples/taylor3.py | 1 | 4261 | '''
How to create a Taylor diagram with labeled data points and modified axes
A third example of how to create a Taylor diagram given one set of
reference observations and multiple model predictions for the quantity.
This example is a variation on the first example (taylor1) where now the
data points are labeled and axes properties are specified. The number format
is also specified for the RMS contour labels.
All functions in the Skill Metrics library are designed to only work with
one-dimensional arrays, e.g. time series of observations at a selected
location. The one-dimensional data are read in as dictionaries via a
pickle file: ref['data'], pred1['data'], pred2['data'],
and pred3['data']. The plot is written to a file in Portable Network
Graphics (PNG) format.
The reference data used in this example are cell concentrations of a
phytoplankton collected from cruise surveys at selected locations and
time. The model predictions are from three different simulations that
have been space-time interpolated to the location and time of the sample
collection. Details on the contents of the dictionary (once loaded) can
be obtained by simply executing the following two statements
>> key_to_value_lengths = {k:len(v) for k, v in ref.items()}
>> print key_to_value_lengths
{'units': 6, 'longitude': 57, 'jday': 57, 'date': 57, 'depth': 57,
'station': 57, 'time': 57, 'latitude': 57, 'data': 57}
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
Created on Dec 6, 2016
@author: [email protected]
'''
import matplotlib.pyplot as plt
import numpy as np
import pickle
import skill_metrics as sm
from sys import version_info
def load_obj(name):
# Load object from file in pickle format
if version_info[0] == 2:
suffix = 'pkl'
else:
suffix = 'pkl3'
with open(name + '.' + suffix, 'rb') as f:
return pickle.load(f) # Python2 succeeds
class Container(object):
def __init__(self, pred1, pred2, pred3, ref):
self.pred1 = pred1
self.pred2 = pred2
self.pred3 = pred3
self.ref = ref
if __name__ == '__main__':
# Close any previously open graphics windows
# ToDo: fails to work within Eclipse
plt.close('all')
# Read data from pickle file
data = load_obj('taylor_data')
# Calculate statistics for Taylor diagram
# The first array element (e.g. taylor_stats1[0]) corresponds to the
# reference series while the second and subsequent elements
# (e.g. taylor_stats1[1:]) are those for the predicted series.
taylor_stats1 = sm.taylor_statistics(data.pred1,data.ref,'data')
taylor_stats2 = sm.taylor_statistics(data.pred2,data.ref,'data')
taylor_stats3 = sm.taylor_statistics(data.pred3,data.ref,'data')
# Store statistics in arrays
sdev = np.array([taylor_stats1['sdev'][0], taylor_stats1['sdev'][1],
taylor_stats2['sdev'][1], taylor_stats3['sdev'][1]])
crmsd = np.array([taylor_stats1['crmsd'][0], taylor_stats1['crmsd'][1],
taylor_stats2['crmsd'][1], taylor_stats3['crmsd'][1]])
ccoef = np.array([taylor_stats1['ccoef'][0], taylor_stats1['ccoef'][1],
taylor_stats2['ccoef'][1], taylor_stats3['ccoef'][1]])
# Specify labels for points in a cell array (M1 for model prediction 1,
# etc.). Note that a label needs to be specified for the reference even
# though it is not used.
label = ['Non-Dimensional Observation', 'M1', 'M2', 'M3']
'''
Produce the Taylor diagram
Label the points and change the axis options for SDEV, CRMSD, and CCOEF.
For an exhaustive list of options to customize your diagram,
please call the function at a Python command line:
>> taylor_diagram
'''
intervalsCOR = np.concatenate((np.arange(0,1.0,0.2),
[0.9, 0.95, 0.99, 1]))
sm.taylor_diagram(sdev,crmsd,ccoef, markerLabel = label,
tickRMS = np.arange(0,60,20),
tickSTD = np.arange(0,55,5), tickCOR = intervalsCOR,
rmslabelformat = ':.1f')
# Write plot to file
plt.savefig('taylor3.png')
# Show plot
plt.show()
| gpl-3.0 |
ssaeger/scikit-learn | sklearn/metrics/ranking.py | 4 | 27716 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
matthiasdiener/spack | var/spack/repos/builtin/packages/cosmomc/package.py | 5 | 7786 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import fnmatch
import os
class Cosmomc(Package):
"""CosmoMC is a Fortran 2008 Markov-Chain Monte-Carlo (MCMC) engine
for exploring cosmological parameter space, together with
Fortran and python code for analysing Monte-Carlo samples and
importance sampling (plus a suite of scripts for building grids
of runs, plotting and presenting results)."""
homepage = "http://cosmologist.info/cosmomc/"
url = "https://github.com/cmbant/CosmoMC/archive/Nov2016.tar.gz"
version('2016.11', '98620cb746352f68fb0c1196e9a070ac')
version('2016.06', '92dc651d1407cca6ea9228992165f5cb')
def url_for_version(self, version):
names = {'2016.11': "Nov2016",
'2016.06': "June2016"}
return ("https://github.com/cmbant/CosmoMC/archive/%s.tar.gz" %
names[str(version)])
variant('mpi', default=True, description='Enable MPI support')
variant('planck', default=False,
description='Enable Planck Likelihood code and baseline data')
variant('python', default=True, description='Enable Python bindings')
extends('python', when='+python')
depends_on('mpi', when='+mpi')
depends_on('planck-likelihood', when='+planck')
depends_on('py-matplotlib', type=('build', 'run'), when='+python')
depends_on('py-numpy', type=('build', 'run'), when='+python')
depends_on('py-pandas', type=('build', 'run'), when='+python')
depends_on('py-scipy', type=('build', 'run'), when='+python')
depends_on('py-six', type=('build', 'run'), when='+python')
depends_on('python @2.7:2.999,3.4:', type=('build', 'run'), when='+python')
patch('Makefile.patch')
patch('errorstop.patch')
parallel = False
def install(self, spec, prefix):
# Clean up environment to avoid configure problems
os.environ.pop('LINKMPI', '')
os.environ.pop('NERSC_HOST', '')
os.environ.pop('NONCLIKLIKE', '')
os.environ.pop('PICO', '')
os.environ.pop('PRECISION', '')
os.environ.pop('RECOMBINATION', '')
os.environ.pop('WMAP', '')
# Set up Planck data if requested
clikdir = join_path('data', 'clik')
try:
os.remove(clikdir)
except OSError:
pass
if '+planck' in spec:
os.symlink(join_path(os.environ['CLIK_DATA'], 'plc_2.0'), clikdir)
else:
os.environ.pop('CLIK_DATA', '')
os.environ.pop('CLIK_PATH', '')
os.environ.pop('CLIK_PLUGIN', '')
# Choose compiler
# Note: Instead of checking the compiler vendor, we should
# rewrite the Makefile to use Spack's options all the time
if spec.satisfies('%gcc'):
if not spec.satisfies('%gcc@6:'):
raise InstallError(
"When using GCC, "
"CosmoMC requires version gcc@6: for building")
choosecomp = 'ifortErr=1' # choose gfortran
elif spec.satisfies('%intel'):
if not spec.satifies('%intel@14:'):
raise InstallError(
"When using the Intel compiler, "
"CosmoMC requires version intel@14: for building")
choosecomp = 'ifortErr=0' # choose ifort
else:
raise InstallError("Only GCC and Intel compilers are supported")
# Configure MPI
if '+mpi' in spec:
wantmpi = 'BUILD=MPI'
mpif90 = 'MPIF90C=%s' % spec['mpi'].mpifc
else:
wantmpi = 'BUILD=NOMPI'
mpif90 = 'MPIF90C='
# Choose BLAS and LAPACK
lapack = ("LAPACKL=%s" %
(spec['lapack'].libs + spec['blas'].libs).ld_flags)
# Build
make(choosecomp, wantmpi, mpif90, lapack)
# Install
mkdirp(prefix.bin)
install('cosmomc', prefix.bin)
root = join_path(prefix.share, 'cosmomc')
mkdirp(root)
entries = [
'batch1',
'batch2',
'batch3',
'camb',
'chains',
'clik_latex.paramnames',
'clik_units.paramnames',
'cosmomc.cbp',
'data',
'distgeneric.ini',
'distparams.ini',
'disttest.ini',
'docs',
'job_script',
'job_script_MOAB',
'job_script_SLURM',
'paramnames',
'params_generic.ini',
'planck_covmats',
'scripts',
# don't copy 'source'
'test.ini',
'test_pico.ini',
'test_planck.ini',
'tests',
]
if '+python' in spec:
entries += ['python']
for entry in entries:
if os.path.isfile(entry):
install(entry, root)
else:
install_tree(entry, join_path(root, entry))
for dirpath, dirnames, filenames in os.walk(prefix):
for filename in fnmatch.filter(filenames, '*~'):
os.remove(os.path.join(dirpath, filename))
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
prefix = self.prefix
spec = self.spec
os.environ.pop('LINKMPI', '')
os.environ.pop('NERSC_HOST', '')
os.environ.pop('NONCLIKLIKE', '')
os.environ.pop('PICO', '')
os.environ.pop('PRECISION', '')
os.environ.pop('RECOMBINATION', '')
os.environ.pop('WMAP', '')
os.environ.pop('COSMOMC_LOCATION', '')
os.environ.pop('PLC_LOCATION', '')
os.environ.pop('CLIKPATH', '')
os.environ.pop('PLANCKLIKE', '')
exe = spec['cosmomc'].command.path
args = []
if '+mpi' in spec:
# Add mpirun prefix
args = ['-np', '1', exe]
exe = join_path(spec['mpi'].prefix.bin, 'mpiexec')
cosmomc = Executable(exe)
with working_dir('spack-check', create=True):
for entry in [
'camb',
'chains',
'data',
'paramnames',
'planck_covmats',
]:
os.symlink(join_path(prefix.share, 'cosmomc', entry), entry)
inifile = join_path(prefix.share, 'cosmomc', 'test.ini')
cosmomc(*(args + [inifile]))
if '+planck' in spec:
inifile = join_path(prefix.share, 'cosmomc', 'test_planck.ini')
cosmomc(*(args + [inifile]))
| lgpl-2.1 |
vllab/TSMC_DL | Mixture_Model/mixture.py | 1 | 4727 | from datetime import datetime
import sys
import numpy as np
from sklearn.cluster import KMeans
EPS = np.finfo(float).eps
class mixture(object):
def __init__(self, n_components, init_params='wm', n_iter=100, tol=1e-3,
covariance_type='diag', min_covar=1e-4, verbose=False):
#: number of components in the mixture
self.n_components = n_components
#: params to init
self.init_params = init_params
#: max number of iterations
self.n_iter = n_iter
#: convergence threshold
self.tol = tol
self.covariance_type = covariance_type
self.min_covar = min_covar
self.verbose = verbose
k = self.n_components
self.weights = np.array([1 / k for _ in range(k)])
self.means = None
self.covars = None
self.converged_ = False
def fit(self, x, means_init_heuristic='random', means=None, labels=None):
k = self.n_components
n = x.shape[0]
d = x.shape[1]
self.means = np.ndarray(shape=(k, d))
# initialization of the means
if 'm' in self.init_params:
if self.verbose:
print('using {} heuristic to initialize the means'
.format(means_init_heuristic))
if means_init_heuristic == 'random':
self.means = np.random.rand(k, d) * 0.5 + 0.25
elif means_init_heuristic == 'data_classes_mean':
if labels is None:
raise ValueError(
'labels required for data_classes_mean init')
self.means = _data_classes_mean_init(x, labels)
elif means_init_heuristic == 'kmeans':
self.means = _kmeans_init(x, k, means=means,
verbose=self.verbose)
# initialization of the covars
if 'c' in self.init_params:
if self.verbose:
print('initializing covars')
cv = np.cov(x.T) + self.min_covar * np.eye(x.shape[1])
if self.covariance_type == 'diag':
self.covars = np.tile(np.diag(cv), (k, 1))
elif self.covariance_type == 'full':
self.covars = np.tile(cv, (k, 1, 1))
start = datetime.now()
iterations = 0
prev_log_likelihood = None
current_log_likelihood = -np.inf
while iterations <= self.n_iter:
elapsed = datetime.now() - start
prev_log_likelihood = current_log_likelihood
# expectation step
log_likelihoods, responsibilities = self.score_samples(x)
current_log_likelihood = log_likelihoods.mean()
if self.verbose:
print('[{:02d}] likelihood = {} (elapsed {})'
.format(iterations, current_log_likelihood, elapsed))
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
self._do_mstep(x, responsibilities)
iterations += 1
end = datetime.now()
elapsed = end - start
print('converged in {} iterations in {}'
.format(iterations, elapsed))
def _do_mstep(self, x, z):
weights = z.sum(axis=0)
weighted_x_sum = np.dot(z.T, x)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
self.weights = (weights / (weights.sum() + 10 * EPS) + EPS)
self.means = weighted_x_sum * inverse_weights
def score_samples(self, x):
log_support = self._log_support(x)
lpr = log_support + np.log(self.weights)
logprob = np.logaddexp.reduce(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def predict(self, x):
return np.sum(np.exp(self._log_support(x)), 1)
def _kmeans_init(x, k, means=None, verbose=False):
if means is None:
kmeans = KMeans(n_clusters=k,
verbose=int(verbose)).fit(x).cluster_centers_
else:
assert means.shape[0] >= k, 'not enough means provided for kmeans init'
# keeping the first self.k means
kmeans = means[:k, :]
return kmeans
def _data_classes_mean_init(x, labels):
n, d = x.shape
assert labels.shape[0] == n, 'labels and data shapes must match'
label_set = set(labels)
n_labels = len(label_set)
means = np.ndarray(shape=(n_labels, d))
for l in label_set:
matches = np.in1d(labels, l)
means[l] = x[matches].mean(0)
return means
| gpl-3.0 |
dashaub/espandas | tests/test_espandas.py | 1 | 2859 | """Unit tests for the Espandas class"""
import pytest
import pandas as pd
import numpy as np
from espandas import Espandas
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import RequestError, ConnectionError
# ES variables
INDEX = 'unit_tests_index'
TYPE = 'foo_bar'
# Example data frame
df = (100 * pd.DataFrame(np.round(np.random.rand(100, 5), 2))).astype(int)
df.columns = ['A', 'B', 'C', 'D', 'E']
df['indexId'] = df.index + 100
df = df.astype('str')
def test_es():
"""
Before running other tests, ensure connection to ES is established
"""
es = Elasticsearch()
try:
es.indices.create(INDEX)
es.indices.delete(INDEX)
return True
except RequestError:
print('Index already exists: skipping tests.')
return False
except ConnectionError:
print('The ElasticSearch backend is not running: skipping tests.')
return False
except Exception as e:
print('An unknown error occured connecting to ElasticSearch: %s' % e)
return False
def test_es_client():
"""
Insert a DataFrame and test that is is correctly extracted
"""
# Only run this test if the index does not already exist
# and can be created and deleted
if test_es():
try:
print('Connection to ElasticSearch established: testing write and read.')
es = Elasticsearch()
es.indices.create(INDEX)
esp = Espandas()
esp.es_write(df, INDEX, TYPE)
k = list(df['indexId'].astype('str'))
res = esp.es_read(k, INDEX, TYPE)
# The returned DataFrame should match the original
assert res.shape == df.shape
assert np.all(res.index == df.index)
assert np.all(res.columns == df.columns)
assert np.all(res == df)
# Bogus keys should not match anything
res = esp.es_read(['bar'], INDEX, TYPE)
assert res is None
num_sample = 3
present = list(df.sample(num_sample)['indexId'].astype('str'))
present.append('bar')
res = esp.es_read(present, INDEX, TYPE)
assert res.shape[0] == num_sample
# Test for invalid inputs
# Input must be a DataFrame
with pytest.raises(ValueError):
esp.es_write('foobar', INDEX, TYPE)
# uid_name must exist in the DataFrame
with pytest.raises(ValueError):
esp.es_write(df, INDEX, TYPE, uid_name='foo_index')
# Values in uid_name must be unique
df2 = df.copy()
df2.ix[0, 'indexId'] = df.ix[1, 'indexId']
with pytest.raises(ValueError):
esp.es_write(df2, INDEX, TYPE)
finally:
# Cleanup
es.indices.delete(INDEX)
| gpl-3.0 |
pprett/scikit-learn | sklearn/datasets/lfw.py | 18 | 18714 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove, rename
from os.path import join, exists, isdir
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home
from ..utils import Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
archive_path_temp = archive_path + ".tmp"
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path_temp)
rename(archive_path_temp, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit-learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
| bsd-3-clause |
rmit-ir/LTR_Cascade | python/Cascade.py | 1 | 23516 | from __future__ import print_function
from __future__ import division
import ast
import baker
import logging
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import MaxAbsScaler
import core
from core.cascade import load_data_file, load_data, load_costs_data, load_model, save_model
from core.cascade import Prune, TreeModel, SVMModel, SGDClassifierModel, group_offsets
from core.metrics import test_all
# TODO: batch per and not per example (group by query-id using utils.py and sample per group)
def batch_generator(X, y, batch_size, samples_per_epoch):
"""Generate mini-batches."""
number_of_batches = int(samples_per_epoch / batch_size)
shuffle_index = np.arange(np.shape(y)[0])
np.random.shuffle(shuffle_index)
X = X[shuffle_index, :]
y = y[shuffle_index]
for i in range(number_of_batches):
index_batch = shuffle_index[batch_size * i:batch_size * (i + 1)]
X_batch = X[index_batch, :]
if isinstance(X_batch, csr_matrix):
X_batch = X_batch.todense()
y_batch = y[index_batch]
yield np.array(X_batch), y_batch
def predict(cascade, test_data, costs, output_trec_run=None, output_eval=None):
"""Run prediction using the cascade."""
x, y, qid, docno = test_data
# if 'scaler' in cascade:
# cascade['scaler'].transform(x)
states = core.cascade.predict(cascade['stages'], x, qid, cascade['score_update'])
eval_results = {}
for i, state in enumerate(states, 1):
test_metrics = test_all(state['preds'], y, qid, 1) # NOTE: rel was set to 1
print('stage %i: '
'test ERR@5/10/20 %0.4f/%0.4f/%0.4f, '
'test NDCG@5/10/20 %0.4f/%0.4f/%0.4f, '
'test P@5/10/20 %0.4f/%0.4f/%0.4f' %
(i,
test_metrics['err@5'], test_metrics['err@10'], test_metrics['err@20'],
test_metrics['ndcg@5'], test_metrics['ndcg@10'], test_metrics['ndcg@20'],
test_metrics['p@5'], test_metrics['p@10'], test_metrics['p@20']))
n_used_features = len(np.flatnonzero(state['extract_counts']))
n_active_docs = len(state['indexes'])
cost_spent_weighted = np.sum(costs * state['extract_counts'])
print(' weighted L1 %f, cascade features %i, num docs %i, cascade cost %0.2f' %
(np.nan,
n_used_features,
n_active_docs,
cost_spent_weighted / float(x.shape[0])))
name = 'stage%i' % i if i < len(states) else 'cascade'
for m in ['err', 'ndcg', 'map', 'ndcg@5', 'ndcg@10', 'ndcg@20',
'p@5', 'p@10', 'p@20', 'err@5', 'err@10', 'err@20']:
eval_results['%s_%s' % (name, m)] = test_metrics[m]
eval_results['%s_n_features' % name] = n_used_features
eval_results['%s_n_docs' % name] = n_active_docs
eval_results['%s_cost' % name] = cost_spent_weighted
eval_results['%s_cost_per_doc' % name] = cost_spent_weighted / float(x.shape[0])
if output_eval:
with open(output_eval, 'w') as output:
for i, _ in enumerate(states, 1):
name = 'stage%i' % i if i < len(states) else 'cascade'
for m in ['n_features', 'n_docs']:
measure = '%s_%s' % (name, m)
output.write('%-24s%i\n' % (measure, eval_results[measure]))
for m in ['err', 'ndcg', 'map', 'err@5', 'err@10', 'err@20',
'ndcg@5', 'ndcg@10', 'ndcg@20', 'p@5', 'p@10', 'p@20',
'cost', 'cost_per_doc']:
measure = '%s_%s' % (name, m)
output.write('%-24s%0.4f\n' % (measure, eval_results[measure]))
logging.info('Eval result saved to %s' % output_eval)
if output_trec_run:
with open(output_trec_run, 'w') as output:
core.cascade.print_trec_run(output, states[-1]['preds'], y, qid, docno)
logging.info('TREC run saved to %s' % output_trec_run)
def train(train_data, valid_data, costs, importance, n_stages, cutoffs, feature_partitions, alphas, **params):
"""Learn one ranker with SGD and L1 regularization.
Args:
n_stages: number of rankers in the cascade
strategies: a dict of callback functions
"""
x_train, y_train, qid_train, _ = train_data
x_valid, y_valid, qid_valid, _ = valid_data
running_costs = costs.copy()
opened_features = np.array([], dtype=int)
stages = []
for i, features_to_open in enumerate(feature_partitions, 1):
# retrieve the set of features open in this stage
opened_features = np.union1d(opened_features, features_to_open)
non_open_features = np.setdiff1d(np.arange(costs.shape[0]), opened_features)
# hide non-open features in both training/validation sets
x_train_prime = x_train.copy()
x_train_prime[:, non_open_features] = 0
x_valid_prime = None
if x_valid is not None:
x_valid_prime = x_valid.copy()
x_valid_prime[:, non_open_features] = 0
alpha = alphas.pop(0)
print('stage %i: train (with alpha %f)' % (i, alpha))
fit = _train((x_train_prime, y_train, qid_train),
(x_valid_prime, y_valid, qid_valid),
costs=running_costs, importance=importance,
alpha=alpha, **params)
model = SGDClassifierModel(fit)
cutoff = cutoffs.pop(0)
prune = Prune(rank=cutoff) if cutoff else None
stages.append((prune, model))
# amend the cost (features used by the model are now free)
used_features = np.flatnonzero(model.get_feature_mask())
running_costs[used_features] = 0
return stages
def _train(train_data, valid_data, costs, importance,
max_iter=10, alpha=0.1, minibatch=1000, epochs=10,
l1_ratio=1.0, penalty='none', eta0=0.01):
"""Train one cost-aware linear model using SGD.
Args:
max_iter: number of passes over the mini-batch (mini-epoch?)
alpha: regularizer weight
minibatch: size of a mini-batch
epochs: number of passes over the training data
"""
x_train, y_train, qid_train = train_data
x_valid, y_valid, qid_valid = valid_data
model = SGDClassifier(alpha=alpha,
verbose=False,
shuffle=False,
n_iter=max_iter,
learning_rate='constant',
penalty=penalty,
l1_ratio=l1_ratio,
eta0=eta0)
model.classes_ = np.array([-1, 1])
# fit SGD over the full data to initialize the model weights
model.fit(x_train, y_train)
valid_scores = (np.nan, np.nan, np.nan)
if x_valid is not None:
m = test_all(model.decision_function(x_valid), y_valid, qid_valid, 1)
valid_scores = (m['ndcg@10'], m['p@10'], m['err@10'])
print('[%3i]: weighted L1 %8.2f, cost %8d, features %4d, valid ndcg@10/p@10/err@10 %0.4f/%0.4f/%0.4f' %
(0,
np.sum(np.abs(model.coef_[0] * costs)),
np.sum(costs[np.nonzero(model.coef_[0])]),
np.count_nonzero(model.coef_[0]),
valid_scores[0], valid_scores[1], valid_scores[2]))
# SGD algorithm (Tsuruoka et al., 2009)
u = np.zeros(x_train.shape[1])
q = np.zeros(x_train.shape[1])
for epoch in range(1, epochs + 1):
for iterno, batch in enumerate(batch_generator(x_train, y_train, minibatch, x_train.shape[0]), 1):
x, y = batch
# call the internal method to specify custom classes, coef_init, and intercept_init
model._partial_fit(x, y,
alpha=model.alpha,
C=1.0,
loss=model.loss,
learning_rate=model.learning_rate,
n_iter=1,
classes=model.classes_,
sample_weight=None,
coef_init=model.coef_,
intercept_init=model.intercept_)
new_w = np.zeros(model.coef_.shape[1])
u += model.eta0 * model.alpha * costs / float(x_train.shape[0]) # note the costs
for i in range(len(model.coef_[0])):
if model.coef_[0][i] > 0:
new_w[i] = max(0, model.coef_[0][i] - (u[i] + q[i]))
elif model.coef_[0][i] < 0:
new_w[i] = min(0, model.coef_[0][i] + (u[i] - q[i]))
q += new_w - model.coef_[0]
model.coef_[0] = new_w
valid_scores = (np.nan, np.nan, np.nan)
if x_valid is not None:
m = test_all(model.decision_function(x_valid), y_valid, qid_valid, 1)
valid_scores = (m['ndcg@10'], m['p@10'], m['err@10'])
print('[%3i]: weighted L1 %8.2f, cost %8d, features %4d, valid ndcg@10/p@10/err@10 %0.4f/%0.4f/%0.4f' %
(epoch,
np.sum(np.abs(model.coef_[0] * costs)),
np.sum(costs[np.nonzero(model.coef_[0])]),
np.count_nonzero(model.coef_[0]),
valid_scores[0], valid_scores[1], valid_scores[2]))
return model
def retrain(model_type, stages, train_data, valid_data,
learning_rate, subsample, trees, nodes, up_to=0):
params = {'max_depth': 7,
'eta': learning_rate,
'silent': True,
'subsample': subsample}
if model_type in ['GBDT']:
params['objective'] = 'multi:softprob'
params['eval_metric'] = 'mlogloss'
score_function, set_classes = core.get_score_multiclass, True
elif model_type in ['GBRT']:
params['objective'] = 'reg:linear'
params['eval_metric'] = 'rmse'
score_function, set_classes = core.get_score, False
elif model_type in ['LambdaMART']:
params['objective'] = 'rank:pairwise'
params['eval_metric'] = 'rmse'
score_function, set_classes = core.get_score, False
else:
raise Exception()
new_stages = []
for i, (prune, model) in enumerate(stages, 1):
used_features = np.flatnonzero(model.get_feature_mask())
print('stage %i: retrain with %i features' % (i, used_features.size))
_, y_train, _, _ = train_data
class_weights = core.get_class_weights(y_train)
import GBDT
new_model = TreeModel(
model=GBDT.train(train_data, valid_data, score_function, class_weights,
params, trees=trees, nodes=nodes,
set_classes=set_classes, features=used_features),
score_function=score_function,
class_weights=class_weights,
n_features=train_data[0].shape[1])
new_stages.append((prune, new_model))
if i == up_to:
break # NOTE: stop as requested
return new_stages
def retrain_with_RankSVM(stages, train_data, valid_data):
new_stages = []
for i, (prune, model) in enumerate(stages, 1):
used_features = np.flatnonzero(model.get_feature_mask())
print('stage %i: retrain with %i features' % (i, used_features.size))
_, y_train, _, _ = train_data
class_weights = core.get_class_weights(y_train)
params = {'tol': float(1e-4),
'fit_intercept': True,
'cache_size': 20000,
'intercept_scaling': 1,
'class_weight': None,
'verbose': True,
'random_state': None,
'max_iter': 1000,
'loss': 'hinge',
'penalty': 'l2'}
score_function = core.get_score
import RankSVM
new_model = SVMModel(
model=RankSVM.train(train_data, valid_data, RankSVM.get_RankSVM, score_function,
class_weights, params, C=[0.1, 0.5, 1, 2], transform=True),
score_function=score_function,
class_weights=class_weights,
n_features=train_data[0].shape[1])
new_stages.append((prune, new_model))
return new_stages
def train_disjoint_cascade(partition_criteria, train_file, validation_file, test_file,
costs_file=None, importance_file=None, model_prefix=None,
n_stages=3, cutoffs=[None, 10, 5], alpha=0.1, epochs=10, pairwise_transform=False,
GBDT_retraining=False):
"""Train a cascade over a partition of disjoint feature sets."""
np.random.seed(0) # freeze the randomness bit
alphas = alpha if isinstance(alpha, list) else [alpha] * n_stages
params = {'epochs': epochs,
'l1_ratio': 1.0,
'penalty': 'none'}
scaler = MaxAbsScaler(copy=False)
train_data, valid_data, test_data = load_data(
train_file, validation_file, test_file, scaler=scaler)
costs, importance = load_costs_data(
costs_file, importance_file, n_features=train_data[0].shape[1])
# these options don't go well together (or I haven't figured out how to make them)
assert not (pairwise_transform and GBDT_retraining)
# keep the original as GBDT won't work with polarized labels
original_train_data = train_data
# massage the data a bit ...
x_train, y_train, qid_train, docno_train = train_data
y_train = core.polarize(y_train)
if pairwise_transform:
from utils import per_query_transform_pairwise
x_train, y_train = per_query_transform_pairwise(x_train.toarray(), y_train, qid_train)
train_data = (x_train, y_train, qid_train, docno_train)
is_qf = np.ones_like(costs)
x = x_train.toarray()
for j, _ in enumerate(costs):
for a, b in group_offsets(qid_train):
if (x[a:b, j] != x[a, j]).any():
is_qf[j] = 0
break
# NOTE: costs has to be untainted (make copy before passing it to functions)
partitions = partition_criteria(n_stages, is_qf, costs.copy(), importance)
stages = train(train_data, valid_data, costs.copy(), importance, n_stages,
cutoffs=cutoffs, feature_partitions=partitions, alphas=alphas, **params)
if GBDT_retraining:
stages = retrain('GBDT', stages, original_train_data, valid_data,
trees=[5, 10, 50, 100, 500, 1000], nodes=[32])
cascade = {'stages': stages,
'scaler': scaler,
'score_update': core.cascade.UpshiftUpdate(gap=0.1)}
if model_prefix:
save_model(cascade, model_prefix)
predict(cascade, test_data, costs)
@baker.command(name='train')
def do_train(strategy, train_file, validation_file, test_file,
costs_file=None, importance_file=None, model_prefix=None,
n_stages=3, cutoffs="[None,10,5]", alpha="0.1", epochs=10,
pairwise_transform=False, GBDT_retraining=False, use_query_features=False):
"""Train a disjoint cascade"""
def no_partition(n_stages, is_qf, costs, _):
return [np.arange(costs.shape[0])] * n_stages
def random_partition(n_stages, is_qf, costs, _):
if use_query_features:
features = np.random.permutation(costs.shape[0])
else:
features = np.flatnonzero(1 - is_qf)
np.random.shuffle(features)
return np.array_split(features, n_stages)
def cost_biased_partition(n_stages, is_qf, costs, _):
if use_query_features:
features = np.argsort(costs)
else:
nqf = np.flatnonzero(1 - is_qf)
features = nqf[np.argsort(costs[nqf])]
return np.array_split(features, n_stages)
def importance_biased_partition(n_stages, is_qf, _, importance):
if use_query_features:
features = np.argsort(importance)
else:
nqf = np.flatnonzero(1 - is_qf)
features = nqf[np.argsort(importance[nqf])]
return np.array_split(features[::-1], n_stages)
def efficiency_biased_partition(n_stages, is_qf, costs, importance):
efficiency = importance / costs # or any other return curves would do
if use_query_features:
features = np.argsort(efficiency)
else:
nqf = np.flatnonzero(1 - is_qf)
features = nqf[np.argsort(efficiency[nqf])]
return np.array_split(features[::-1], n_stages) # in descending order
if strategy in ['default', 'all']:
partition = no_partition
elif strategy in ['random']:
partition = random_partition
elif strategy in ['cost_biased', 'cost']:
partition = cost_biased_partition
elif strategy in ['importance_biased', 'importance']:
partition = importance_biased_partition
elif strategy in ['efficiency_biased', 'efficiency']:
partition = efficiency_biased_partition
else:
print("Strategy not available: '%s'" % strategy)
return
train_disjoint_cascade(partition, train_file, validation_file, test_file,
costs_file, importance_file, model_prefix=model_prefix,
n_stages=n_stages, cutoffs=ast.literal_eval(cutoffs),
alpha=ast.literal_eval(alpha), epochs=epochs,
pairwise_transform=pairwise_transform, GBDT_retraining=GBDT_retraining)
@baker.command(name='train_budgeted_GBDT')
def do_train_budgeted_GBDT(train_file, validation_file, test_file, costs_file=None,
importance_file=None, model_prefix=None, budget=None,
trees='[5, 10, 50, 100, 500, 1000]', nodes='[32]'):
"""Train a 1-stage budgeted GBDT cascade"""
train_data, valid_data, test_data = load_data(train_file, validation_file, test_file)
costs, importance = load_costs_data(costs_file, importance_file,
n_features=train_data[0].shape[1])
x_train, _, _ = train_data
x_train = x_train.toarray()
# not all features will be used in a full model
all_fids = [i for i in range(x_train.shape[1]) if any(x_train[:, i])]
budget = float(budget)
if budget:
c = costs[all_fids]
c[c.argsort()] = c[c.argsort()].cumsum()
fids = [fid for fid, b in zip(all_fids, c) if b <= budget]
else:
fids = all_fids
used_features = np.array(fids)
# used_features = np.flatnonzero(model.get_feature_mask())
print('Train a budgeted GBDT with %i features' % used_features.size)
_, y_train, _ = train_data
class_weights = core.get_class_weights(y_train)
params = {'max_depth': 7,
'eta': 0.1,
'silent': True,
'objective': 'multi:softprob',
'eval_metric': 'mlogloss',
'subsample': 0.5}
import GBDT
new_model = TreeModel(
model=GBDT.train(train_data, valid_data, core.get_score_multiclass, class_weights,
params, trees=ast.literal_eval(trees), nodes=ast.literal_eval(nodes),
set_classes=True, features=used_features),
score_function=core.get_score_multiclass,
class_weights=class_weights,
n_features=train_data[0].shape[1])
cascade = {'stages': [(None, new_model)],
'score_update': core.cascade.UpshiftUpdate(gap=0.1)}
if model_prefix:
save_model(cascade, model_prefix)
predict(cascade, test_data, costs)
@baker.command(name='predict')
def do_predict(test_file, costs_file, model_file, output_trec_run=None, output_eval=None,
override_cutoffs=None):
"""Run prediction with a saved cascade"""
test_data = load_data_file(test_file)
costs, _ = load_costs_data(costs_file, None, n_features=test_data[0].shape[1])
cascade = load_model(model_file)
if 'scaler' in cascade:
cascade['scaler'].transform(test_data[0])
if override_cutoffs:
cutoffs = ast.literal_eval(override_cutoffs)
logging.info('Override cutoffs with %s' % cutoffs)
new_stages = []
for i, (prune, model) in enumerate(cascade['stages']):
new_stages.append((Prune(rank=cutoffs[i]), model))
cascade['stages'] = new_stages
predict(cascade, test_data, costs,
output_trec_run=output_trec_run, output_eval=output_eval)
@baker.command(name='retrain')
def do_retrain(model_type, train_file, validation_file, model_file, new_model_file,
test_file=None, costs_file=None, random=0, up_to=0,
learning_rate="0.1", subsample="0.5", trees="[5,10,50,100,500,1000]", nodes="[32]",
output_trec_run=None, output_eval=None):
"""Retrain a tree-based cascade using features learned in the linear models"""
train_data = load_data_file(train_file)
valid_data = (None,) * 4
if validation_file:
valid_data = load_data_file(validation_file)
test_data = (None,) * 4
costs = None
if test_file is not None and costs_file is not None:
test_data = load_data_file(test_file)
costs, _ = load_costs_data(costs_file, None, n_features=test_data[0].shape[1])
cascade = load_model(model_file)
if 'scaler' in cascade:
cascade['scaler'].transform(train_data[0])
if valid_data[0] is not None:
cascade['scaler'].transform(valid_data[0])
if test_data[0] is not None:
cascade['scaler'].transform(test_data[0])
if random > 0:
for _ in range(random):
tree = 1 + np.random.randint(1000)
node = np.random.choice([2, 4, 8, 16, 32, 64])
print('tree %i, node %i' % (tree, node))
new_cascade = cascade.copy()
new_cascade['stages'] = retrain(model_type, cascade['stages'], train_data, valid_data,
learning_rate=ast.literal_eval(learning_rate),
subsample=ast.literal_eval(subsample),
trees=[tree], nodes=[node], up_to=up_to)
if test_data[0] is not None:
predict(new_cascade, test_data, costs,
output_trec_run=output_trec_run, output_eval=output_eval)
return
cascade['stages'] = retrain(model_type, cascade['stages'], train_data, valid_data,
learning_rate=ast.literal_eval(learning_rate),
subsample=ast.literal_eval(subsample),
trees=ast.literal_eval(trees), nodes=ast.literal_eval(nodes), up_to=up_to)
save_model(cascade, new_model_file)
if test_data[0] is not None:
predict(cascade, test_data, costs,
output_trec_run=output_trec_run, output_eval=output_eval)
@baker.command(name='info')
def do_info(model_file):
s = set()
cascade = load_model(model_file)
for i, (_, stage) in enumerate(cascade['stages'], 1):
fids = np.flatnonzero(stage.get_feature_mask()) + 1
print('stage', i)
print('n_features', len(fids))
print('fids', fids)
for i in fids:
s.add(i)
print('total n_features', len(s))
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
baker.run()
| mit |
abimannans/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/linear_model/plot_sgd_separating_hyperplane.py | 1 | 1221 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.linear_model import SGDClassifier
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([[x1, x2]])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| mit |
alvations/oque | que-en-de.py | 1 | 8337 |
import io, sys
import numpy as np
from scipy.stats import uniform as sp_rand
from itertools import combinations
from sklearn.linear_model import BayesianRidge
from sklearn.grid_search import RandomizedSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from o import cosine_feature, complexity_feature
train, test = 'training', 'test'
def load_quest(direction, dataset, which_data, quest_data_path='quest/',
to_normalize=True):
'''
# USAGE:
baseline_train = load_quest('en-de', 'training', 'baseline17')
baseline_train = load_quest('en-de', 'test', 'baseline17')
meteor_train = load_quest('en-de', 'training', 'meteor')
meteor_test = load_quest('en-de', 'test', 'meteor')
'''
x = np.loadtxt(quest_data_path+direction+'.'+dataset+'.'+which_data)
if to_normalize:
x = x / np.linalg.norm(x)
return x
def load_wmt15_data(direction):
# Load train data
baseline_train = load_quest(direction, train, 'baseline17', to_normalize=False)
meteor_train = load_quest(direction, train, 'meteor', to_normalize=False)
# Load test data
baseline_test = load_quest(direction, test, 'baseline17', to_normalize=False)
meteor_test = load_quest(direction, test, 'meteor', to_normalize=False)
return baseline_train, meteor_train, baseline_test, meteor_test
def load_cosine_features(direction):
cos_train = cosine_feature(direction, train)
#cos_train = complexity_feature(direction, train)
cos_test = cosine_feature(direction, test)
#cos_test = complexity_feature(direction, test)
return cos_train, cos_test
def train_classiifer(X_train, y_train, to_tune, classifier):
# Initialize Classifier.
clf = BayesianRidge()
clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
#clf = RandomForestRegressor()
if classifier:
clf = classifier
to_tune = False
if to_tune:
# Grid search: find optimal classifier parameters.
param_grid = {'alpha_1': sp_rand(), 'alpha_2': sp_rand()}
param_grid = {'C': sp_rand(), 'gamma': sp_rand()}
rsearch = RandomizedSearchCV(estimator=clf,
param_distributions=param_grid, n_iter=5000)
rsearch.fit(X_train, y_train)
# Use tuned classifier.
clf = rsearch.best_estimator_
# Trains Classifier
clf.fit(X_train, y_train)
return clf
def brute_force_feature_selection():
x = range(1,18)
for l in range (1,len(x)+1):
for f in list(combinations(range(0,len(x)),l)):
yield f
def evaluate_classifier(clf, X_test, direction, with_cosine,
to_tune, to_output=False, to_hack=False):
answers = list(clf.predict(X_test))
if to_hack:
hacked_answers = []
for i,j in zip(answers, X_test):
if j[9] > 0.7 and j[0] < 12: i = i - 0.2;
if j[0] ==1 or j[1]== 1: i = i - 0.15;
if j[0] > 200: i = i - 0.1;
if i < 0: i = 0.0;
hacked_answers.append(i)
answers = hacked_answers
outfile_name = ''
if to_output: # Outputs to file.
to_tune_str = 'tuned' if to_tune else 'notune'
model_name = 'withcosine' if with_cosine else 'baseline'
outfile_name = ".".join(['oque',model_name,
to_tune_str,direction,'output'])
with io.open(outfile_name, 'w') as fout:
for i in answers:
fout.write(unicode(i)+'\n')
return answers, outfile_name
def brute_force_classification(X_train, y_train, X_test, y_test,
direction, with_cosine,
to_tune, to_output=False, to_hack=False):
#score_fout = io.open('que.'+direction+'.scores', 'w')
for f in brute_force_feature_selection():
_X_train = X_train[:, f]
_X_test = X_test[:, f]
# Train classifier
clf = train_classiifer(_X_train, y_train, to_tune, classifier=None)
answers, outfile_name = evaluate_classifier(clf, _X_test, direction,
with_cosine, to_tune,
to_output=False, to_hack=False)
mse = mean_squared_error(y_test, np.array(answers))
mae = mean_absolute_error(y_test, np.array(answers))
if to_output:
outfile_name = "results/oque.baseline." + direction +'.'+str(mae) + '.'
outfile_name+= "-".join(map(str, f))+'.output'
with io.open(outfile_name, 'w') as fout:
for i in answers:
fout.write(unicode(i)+'\n')
print mae, f
sys.stdout.flush()
def experiments(direction, with_cosine, to_tune, to_output=False, to_hack=False,
to_debug=False, classifier=None):
'''
# USAGE:
direction = 'en-de'
to_tune = False
with_cosine = False
outfilename, mae, mse = experiments(direction, to_tune, with_cosine)
print outfilename, mae, mse
'''
# Create training and testing array and outputs
X_train, y_train, X_test, y_test = load_wmt15_data(direction)
if with_cosine:
# Create cosine features for training
cos_train, cos_test = load_cosine_features(direction)
X_train = np.concatenate((X_train, cos_train), axis=1)
X_test = np.concatenate((X_test, cos_test), axis=1)
brute_force_classification(X_train, y_train, X_test, y_test, direction,
with_cosine, to_tune, to_output=False,
to_hack=False)
'''
# Best setup for EN-DE up till now.
f = (2, 9, 13)
_X_train = X_train[:, f]
_X_test = X_test[:, f]
clf = train_classiifer(_X_train, y_train, to_tune, classifier=None)
answers, outfile_name = evaluate_classifier(clf, _X_test, direction,
with_cosine, to_tune,
to_output=True, to_hack=False)
'''
mse = mean_squared_error(y_test, np.array(answers))
mae = mean_absolute_error(y_test, np.array(answers))
if to_debug:
srcfile = io.open('quest/en-de_source.test', 'r')
trgfile = io.open('quest/en-de_target.test', 'r')
cos_train, cos_test = load_cosine_features(direction)
for i,j,k,s,t, c in zip(answers, y_test, X_test,
srcfile, trgfile, cos_test):
if i - j > 0.095 or j -1 > 0.095 or c == 9.99990000e-11:
print i, j, k[0], k[9], k, c
print s, t
return outfile_name, mae, mse
direction = 'en-de'
with_cosine = False
to_tune = False
to_output = False
outfilename, mae, mse = experiments(direction, with_cosine,to_tune, to_output=False, to_debug=False)
print outfilename, mae, mse
# DE-EN
# no-hack at all
# oque.baseline.notune.de-en.output 0.0692666454858 0.011038250617
# no-hack, with cosine
# oque.withcosine.notune.de-en.output 0.0692590476386 0.0110349222335
# Super default + hack
# oque.baseline.notune.de-en.output 0.0685437539196 0.0106677292505
# hacked
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# oque.withcosine.notune.de-en.output 0.0685361560723 0.0106643693054
# EN-DE
# oque.baseline.notune.en-de.output 0.0980804849285 0.0184924281565
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# oque.baseline.notune.en-de.output 0.097544087243 0.0208756823852
# oque.withcosine.notune.en-de.output 0.0975427119756 0.0208755274686
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.1
# oque.withcosine.notune.en-de.output 0.0973017481202 0.0207602928984
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# oque.withcosine.notune.en-de.output 0.0972310140807 0.0207568924808
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# if j[0] > 200: i = i - 0.1
# oque.withcosine.notune.en-de.output 0.0968903228194 0.0206775825255
# if j[9] > 0.7 and j[0] < 12: i = i -0.2
# if j[0] ==1 or j[1]== 1: i = i - 0.15
# if j[0] > 200: i = i - 0.1
# if i < 0: i = 0.0
# oque.withcosine.notune.en-de.output 0.0968359771138 0.0206633629455 | mit |
openp2pdesign/repoSNA | platform_analysis/sna.py | 2 | 4448 | # -*- encoding: utf-8 -*-
#
# Social Network Analysis of Git, Hg, SVN, GitHub, BitBucket repositories
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
import networkx as nx
import datetime
import pandas as pd
def save_graph(graph, filename, self_loops):
"""
Transform date on a graph to string and save as a graphml file
"""
for u, v, key, attr in graph.edges(data=True, keys=True):
if type(attr["start"]) is datetime.datetime:
attr["start"] = attr["start"].strftime('%Y/%m/%d-%H:%M:%S')
attr["endopen"] = str(attr["endopen"])
if self_loops is False:
self_loops_edges = graph.selfloop_edges(keys=True, data=True)
graph.remove_edges_from(self_loops_edges)
else:
pass
nx.write_graphml(graph, filename)
return
def graph_to_pandas_time_series(graph):
"""
Transform a graph into a pandas time series DataFrame.
"""
# Empty DataFrame of actions
time_dataframe = pd.DataFrame(columns=[
'0',
'1',
'node',
'msg',
'type',
'endopen',
'start',
'value'
])
# Iterate over edges to create a DataFrame of actions
for i in graph.edges_iter(data=True):
if "node" in i[2]:
node = i[2]["node"]
else:
node = "None"
if "msg" in i[2]:
msg = i[2]["msg"]
else:
msg = "None"
# Create a new row
new_row = [
i[0],
i[1],
node,
msg,
i[2]["type"],
i[2]["endopen"],
i[2]["start"],
1
]
# Add the new row to the DataFrame of actions
time_dataframe.loc[len(time_dataframe)] = new_row
# Convert column strings to datetimes
time_dataframe['start'] = pd.to_datetime(time_dataframe['start'])
time_dataframe['endopen'] = pd.to_datetime(time_dataframe['endopen'])
return time_dataframe
def time_analysis(data, focus, interaction, structure):
"""
Analyse a pandas time series DataFrame.
Returns a DataFrame. If structure == "combined".
Returns a Series with all the interactions merged.
"""
# Define the DataFrame index as time-based
data.index = data['start']
# List of types of interaction
interaction_types = data["type"].value_counts()
# Users stats
# Empty dictionary of DataFrames (one for each user)
users_stats = {}
# Users maybe starting (0) or receiving (1) the interaction
# Create a list of users
if interaction == 0:
users = data["0"].value_counts()
elif interaction == 1:
users = data["1"].value_counts()
else:
users = data["0"].value_counts()
# Add empty DataFrame for each active user
for i in users.index:
users_stats[i] = pd.DataFrame(columns=list(interaction_types.index))
# Fill each Dataframe of active users with zeroes, as the default value
for i in data.iterrows():
users_stats[i[1]["0"]].loc[i[1]["start"]] = [0] * len(list(interaction_types.index))
# Add a 1 to each timed interaction
for i in data.iterrows():
users_stats[i[1]["0"]].ix[i[1]["start"], i[1]["type"]] = 1
# Global stats
data_list = []
index_list = []
for i in users_stats:
data_list.append(users_stats[i])
index_list.append(i)
global_stats = pd.concat(data_list)
# Merge interactions if required by the user
if structure.lower() == "combined":
global_stats = global_stats.sum(axis=1)
for i in users_stats:
users_stats[i] = users_stats[i].sum(axis=1)
# Transform users_stats into a multi index DataFrame
users_stats = pd.concat(data_list, keys=index_list)
# Final output
if focus.lower() == "global":
return global_stats
elif focus.lower() == "user":
return users_stats
else:
return global_stats
def type_stats(data, focus):
"""
Return a DataFrame or a dict of DataFrames for global type stats.
Helper function for speeding up analysis.
"""
if focus.lower() == "global":
return data.sum(axis=0)
elif focus.lower() == "user":
users_stats = {}
for i in data:
users_stats[i] = data[i].sum(axis=0)
else:
return data.sum(axis=0)
if __name__ == "__main__":
pass
| lgpl-3.0 |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/backend_tkagg.py | 2 | 27238 | # Todd Miller [email protected]
from __future__ import division
import os, sys, math
import os.path
import Tkinter as Tk, FileDialog
# Paint image to Tk photo blitter extension
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.backends.windowing as windowing
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def round(x):
return int(math.floor(x+0.5))
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
import tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class Show(ShowBase):
def mainloop(self):
Tk.mainloop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
_focus = windowing.FocusManager()
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Tk.Tk()
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class TimerTk(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self.parent = parent
self._timer = None
def _timer_start(self):
self._timer_stop()
self._timer = self.parent.after(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
self.parent.after_cancel(self._timer)
self._timer = None
def _on_timer(self):
TimerBase._on_timer(self)
# Tk after() is only a single shot, so we need to add code here to
# reset the timer if we're not operating in single shot mode.
if not self._single and len(self.callbacks) > 0:
self._timer = self.parent.after(self._interval, self._on_timer)
else:
self._timer = None
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
self._idle_callback = None
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=4)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows)
# Can't get destroy events by binding to _tkcanvas. Therefore, bind
# to the window and filter.
def filter_destroy(evt):
if evt.widget is self._tkcanvas:
self.close_event()
root.bind("<Destroy>", filter_destroy)
self._master = master
self._tkcanvas.focus_set()
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=int(width), height=int(height))
self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)
self.resize_event()
self.show()
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
d = self._idle
self._idle = False
def idle_draw(*args):
self.draw()
self._idle = True
if d:
self._idle_callback = self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, guiEvent=event)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = +1
elif num==5: step = -1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val<256:
key = chr(val)
else:
key = None
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerTk(self._tkcanvas, *args, **kwargs)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.window.wm_title("Figure %d" % num)
self.canvas = canvas
self._num = num
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window.minsize(int(w*3/4),int(h*3/4))
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self.window )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2TkAgg( canvas, self.window )
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.show()
def resize(self, width, height=None):
# before 09-12-22, the resize method takes a single *event*
# parameter. On the other hand, the resize method of other
# FigureManager class takes *width* and *height* parameter,
# which is used to change the size of the window. For the
# Figure.set_size_inches with forward=True work with Tk
# backend, I changed the function signature but tried to keep
# it backward compatible. -JJL
# when a single parameter is given, consider it as a event
if height is None:
width = width.width
else:
self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
self.toolbar.configure(width=width)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
_focus = windowing.FocusManager()
if not self._shown:
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
self.canvas._tkcanvas.bind("<Destroy>", destroy)
self.window.deiconify()
# anim.py requires this
self.window.update()
else:
self.canvas.draw_idle()
self._shown = True
def destroy(self, *args):
if self.window is not None:
#self.toolbar.destroy()
if self.canvas._idle_callback:
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
self.window.destroy()
if Gcf.get_num_fig_managers()==0:
if self.window is not None:
self.window.quit()
self.window = None
def set_window_title(self, title):
self.window.wm_title(title)
class AxisMenu:
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar(Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
xmin, xmax = canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
self.bLeft = self._Button(
text="Left", file="stock_left.ppm",
command=lambda x=-1: self.panx(x))
self.bRight = self._Button(
text="Right", file="stock_right.ppm",
command=lambda x=1: self.panx(x))
self.bZoomInX = self._Button(
text="ZoomInX",file="stock_zoom-in.ppm",
command=lambda x=1: self.zoomx(x))
self.bZoomOutX = self._Button(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=lambda x=-1: self.zoomx(x))
self.bUp = self._Button(
text="Up", file="stock_up.ppm",
command=lambda y=1: self.pany(y))
self.bDown = self._Button(
text="Down", file="stock_down.ppm",
command=lambda y=-1: self.pany(y))
self.bZoomInY = self._Button(
text="ZoomInY", file="stock_zoom-in.ppm",
command=lambda y=1: self.zoomy(y))
self.bZoomOutY = self._Button(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=lambda y=-1: self.zoomy(y))
self.bSave = self._Button(
text="Save", file="stock_save_as.ppm",
command=self.save_figure)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, direction):
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
def pany(self, direction):
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
def zoomx(self, direction):
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
def zoomy(self, direction):
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
def save_figure(self, *args):
fs = FileDialog.SaveFileDialog(master=self.window,
title='Save the figure')
try:
self.lastDir
except AttributeError:
self.lastDir = os.curdir
fname = fs.go(dir_or_file=self.lastDir) # , pattern="*.png")
if fname is None: # Cancel
return
self.lastDir = os.path.dirname(fname)
try:
self.canvas.print_figure(fname)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_tkpaint(msg)
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
if not hasattr(self, "omenu"):
self.set_active(range(naxes))
self.omenu = AxisMenu(master=self, naxes=naxes)
else:
self.omenu.adjust(naxes)
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attriubutes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command):
file = os.path.join(rcParams['datapath'], 'images', file)
im = Tk.PhotoImage(master=self, file=file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
self.bHome = self._Button( text="Home", file="home.ppm",
command=self.home)
self.bBack = self._Button( text="Back", file="back.ppm",
command = self.back)
self.bForward = self._Button(text="Forward", file="forward.ppm",
command = self.forward)
self.bPan = self._Button( text="Pan", file="move.ppm",
command = self.pan)
self.bZoom = self._Button( text="Zoom",
file="zoom_to_rect.ppm",
command = self.zoom)
self.bsubplot = self._Button( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots)
self.bsave = self._Button( text="Save", file="filesave.ppm",
command = self.save_figure)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self, *args):
from tkFileDialog import asksaveasfilename
from tkMessageBox import showerror
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH
#defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
fname = asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes = tk_filetypes,
defaultextension = defaultextension
)
if fname == "" or fname == ():
return
else:
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception, e:
showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
FigureManager = FigureManagerTkAgg
| gpl-3.0 |
themrmax/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
skrzym/monday-morning-quarterback | Application/Site/mmq/main/controllers.py | 1 | 7841 | <<<<<<< HEAD
from flask import Blueprint, render_template, request, url_for, jsonify
from config import mongo
import pandas as pd
import json
from bson import json_util
import retrieve_model as rmodel
from collections import Counter
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def index():
#mongo.db.visits.insert_one({"no":"way"})
#visits = mongo.db.visits.find_one()
#return str(visits)
return render_template('index.html')
@main.route('/predict/')
def get_started():
down_list = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
quarter_list = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
clock_list = [{'value':15,'name':'<15'}, {'value':14,'name':'<14'}, {'value':13,'name':'<13'},
{'value':12,'name':'<12'}, {'value':11,'name':'<11'}, {'value':10,'name':'<10'},
{'value':9,'name':'<9'}, {'value':8,'name':'<8'}, {'value':7,'name':'<7'},
{'value':6,'name':'<6'}, {'value':5,'name':'<5'}, {'value':4,'name':'<4'},
{'value':3,'name':'<3'}, {'value':2,'name':'<2'}, {'value':1,'name':'<1'}]
yards_list = [{'value':0,'name':'inches'}, {'value':1,'name':'1'},
{'value':2,'name':'2'}, {'value':3,'name':'3'}, {'value':4,'name':'4'},
{'value':5,'name':'5'}, {'value':6,'name':'6'}, {'value':7,'name':'7'},
{'value':8,'name':'8'}, {'value':9,'name':'9'}, {'value':10,'name':'10'},
{'value':11,'name':'11'}, {'value':12,'name':'12'}, {'value':13,'name':'13'},
{'value':14,'name':'14'}, {'value':15,'name':'15'}, {'value':16,'name':'16'},
{'value':17,'name':'17'}, {'value':18,'name':'18'}, {'value':19,'name':'19'},
{'value':20,'name':'20'}, {'value':21,'name':'21'}, {'value':22,'name':'22'},
{'value':23,'name':'23'}, {'value':24,'name':'24'}, {'value':25,'name':'25'}]
field_list = range(0,101,1)
score_list = range(0,61,1)
down_dict = [{'value':1,'name':'1st'},{'value':2,'name':'2nd'},{'value':3,'name':'3rd'},{'value':4,'name':'4th'}]
return render_template('predict.html',
=======
from flask import Blueprint, render_template, request, url_for
from config import mongo
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def index():
mongo.db.visits.insert_one({"foo":"bar"})
visits = mongo.db.visits.find_one()
return str(visits)
#return render_template('index.html')
@main.route('/getstarted/')
def get_started():
down_list = ['1st','2nd','3rd','4th']
quarter_list = ['1st','2nd','3rd','4th']
clock_list = ['> 15 min', '> 10 min', '> 5 min', '> 2 min', '< 2 min', '< 1 min']
yards_list = ['inches', 'goal', '1', '2', '3', '4', '5', '6', '7' ,'8', '9', '10', '> 10']
field_list = range(0,105,5)
score_list = range(-60,61,1)
return render_template('getstarted.html',
>>>>>>> master
down_list=down_list,
quarter_list=quarter_list,
clock_list=clock_list,
yards_list=yards_list,
field_list=field_list,
<<<<<<< HEAD
score_list=score_list,
down_dict=down_dict
)
@main.route('/results/', methods=['POST'])
def results():
=======
score_list=score_list
)
@main.route('/run/', methods=['POST'])
def run():
>>>>>>> master
down = request.form['down']
quarter = request.form['quarter']
clock = request.form['clock']
yards = request.form['yards']
field = request.form['field']
score = request.form['score']
<<<<<<< HEAD
sign = request.form['sign']
guess = request.form['guess']
score = str(int(score) * int(sign))
# Store scenario in mongodb
scenario = {
'down': int(down),
'quarter': int(quarter),
'clock': int(clock),
'yards': int(yards),
'field': int(field),
'score': int(score),
'guess': guess
}
# Insert the current user's guess into the DB
print('Puting this into db:', scenario)
mongo.db.scenarios.insert_one(scenario)
# Pull User guesses from MongoDB
#scenarios = mongo.db.scenarios.find()
# Pull NFL Stats from MongoDB
#nflstats = mongo.db.nfldata.find()
guesses = {'pass':'Pass', 'run':'Run', 'punt':'Punt', 'fg':'Field Goal', 'kneel': 'QB Kneel'}
try:
return render_template('results.html',
guess_title = guesses[guess],
=======
guess = request.form['guess']
# Store scenario in mongodb
scenario = {
'down': down,
'quarter': quarter,
'clock': clock,
'yards': yards,
'field': field,
'score': score,
'guess': guess
}
mongo.db.scenarios.insert_one(scenario)
scenarios = mongo.db.scenarios.find()
try:
return render_template('results.html',
>>>>>>> master
down=down,
quarter=quarter,
clock=clock,
yards=yards,
field=field,
score=score,
guess=guess,
<<<<<<< HEAD
scenarios=[None],#scenarios,
nflstats=[None]#nflstats
)
except Exception as e:
return "Something went wrong..." + str(e)
@main.route('/stats/')
def tables():
title = 'Test Table'
title = rmodel.predict_proba(4,4,1,20,-1)
table = title
return render_template('stats.html', table=table, title=title)
@main.route('/data/guesses/')
def guessData():
guess = request.args.get('guess')
down = request.args.get('down')
quarter = request.args.get('quarter')
clock = request.args.get('clock')
yards = request.args.get('yards')
field = request.args.get('field')
score = request.args.get('score')
search_dict = request.args.to_dict()
for key in search_dict:
#if key != 'guess':
try:
search_dict[key] = int(search_dict[key])
except:
pass
print(search_dict)
s=[data['guess'] for data in mongo.db.scenarios.find(search_dict)]
options = ['pass', 'run', 'punt', 'fg', 'kneel']
count = {option:s.count(option) for option in options}
print(count)
return json.dumps(count, default=json_util.default)
@main.route('/data/nfl/')
def nflData():
playtype = request.args.get('PlayType')
down = request.args.get('down')
quarter = request.args.get('quarter')
clock = request.args.get('clock')
yards = request.args.get('yards')
field = request.args.get('field')
score = request.args.get('score')
search_dict = request.args.to_dict()
for key in search_dict:
if key != 'playtype':
try:
search_dict[key] = int(search_dict[key])
except:
pass
s=[data["PlayType"] for data in mongo.db.nfldata.find(search_dict)]
print(s)
options = ['pass', 'run', 'punt', 'fg', 'kneel']
count = {option:s.count(option) for option in options}
print(count)
return json.dumps(count, default=json_util.default)
@main.route('/api/predict/')
def apiPredict():
arg_dict = request.args.to_dict()
for key in arg_dict:
try:
arg_dict[key] = int(arg_dict[key])
except:
pass
calculations = [
{name:rmodel.predict_group_proba(
arg_dict['quarter'],
arg_dict['down'],
arg_dict['yards'],
arg_dict['clock'],
arg_dict['field'],
arg_dict['score'],
name)
} for name in ['quarter', 'down', 'yards', 'timeunder', 'yrdline100', 'scorediff']
]
calculations.append({'request':rmodel.predict_proba(
arg_dict['quarter'],
arg_dict['down'],
arg_dict['yards'],
arg_dict['clock'],
arg_dict['field'],
arg_dict['score'],
False)
})
return jsonify(calculations)
=======
scenarios=scenarios
)
except:
return "fail"
>>>>>>> master
| mit |
ky822/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 71 | 25104 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
MarkRegalla27/ThinkStats2 | code/populations.py | 68 | 2609 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import csv
import logging
import sys
import numpy as np
import pandas
import thinkplot
import thinkstats2
def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'):
"""Reads filename and returns populations in thousands
filename: string
returns: pandas Series of populations in thousands
"""
df = pandas.read_csv(filename, header=None, skiprows=2,
encoding='iso-8859-1')
populations = df[7]
populations.replace(0, np.nan, inplace=True)
return populations.dropna()
def MakeFigures():
"""Plots the CDF of populations in several forms.
On a log-log scale the tail of the CCDF looks like a straight line,
which suggests a Pareto distribution, but that turns out to be misleading.
On a log-x scale the distribution has the characteristic sigmoid of
a lognormal distribution.
The normal probability plot of log(sizes) confirms that the data fit the
lognormal model very well.
Many phenomena that have been described with Pareto models can be described
as well, or better, with lognormal models.
"""
pops = ReadData()
print('Number of cities/towns', len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label='data')
cdf_log = thinkstats2.Cdf(log_pops, label='data')
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(xlabel='log10 population',
ylabel='CCDF',
yscale='log')
thinkplot.Save(root='populations_pareto')
# lognormal plot
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel='log10 population',
ylabel='CDF')
thinkplot.SubPlot(2)
thinkstats2.NormalProbabilityPlot(log_pops, label='data')
thinkplot.Config(xlabel='z',
ylabel='log10 population',
xlim=[-5, 5])
thinkplot.Save(root='populations_normal')
def main():
thinkstats2.RandomSeed(17)
MakeFigures()
if __name__ == "__main__":
main()
| gpl-3.0 |
kevin-intel/scikit-learn | sklearn/neighbors/tests/test_neighbors_pipeline.py | 3 | 8295 | """
This is testing the equivalence between some estimators with internal nearest
neighbors computations, and the corresponding pipeline versions with
KNeighborsTransformer or RadiusNeighborsTransformer to precompute the
neighbors.
"""
import numpy as np
import pytest
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.datasets import make_blobs
from sklearn.pipeline import make_pipeline
from sklearn.base import clone
from sklearn.neighbors import KNeighborsTransformer
from sklearn.neighbors import RadiusNeighborsTransformer
from sklearn.cluster import DBSCAN
from sklearn.cluster import SpectralClustering
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neighbors import RadiusNeighborsRegressor
from sklearn.neighbors import LocalOutlierFactor
from sklearn.manifold import SpectralEmbedding
from sklearn.manifold import Isomap
from sklearn.manifold import TSNE
def test_spectral_clustering():
# Test chaining KNeighborsTransformer and SpectralClustering
n_neighbors = 5
X, _ = make_blobs(random_state=0)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode='connectivity'),
SpectralClustering(n_neighbors=n_neighbors, affinity='precomputed',
random_state=42))
est_compact = SpectralClustering(
n_neighbors=n_neighbors, affinity='nearest_neighbors', random_state=42)
labels_compact = est_compact.fit_predict(X)
labels_chain = est_chain.fit_predict(X)
assert_array_almost_equal(labels_chain, labels_compact)
def test_spectral_embedding():
# Test chaining KNeighborsTransformer and SpectralEmbedding
n_neighbors = 5
n_samples = 1000
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode='connectivity'),
SpectralEmbedding(n_neighbors=n_neighbors, affinity='precomputed',
random_state=42))
est_compact = SpectralEmbedding(
n_neighbors=n_neighbors, affinity='nearest_neighbors', random_state=42)
St_compact = est_compact.fit_transform(S)
St_chain = est_chain.fit_transform(S)
assert_array_almost_equal(St_chain, St_compact)
def test_dbscan():
# Test chaining RadiusNeighborsTransformer and DBSCAN
radius = 0.3
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
# compare the chained version and the compact version
est_chain = make_pipeline(
RadiusNeighborsTransformer(radius=radius, mode='distance'),
DBSCAN(metric='precomputed', eps=radius))
est_compact = DBSCAN(eps=radius)
labels_chain = est_chain.fit_predict(X)
labels_compact = est_compact.fit_predict(X)
assert_array_almost_equal(labels_chain, labels_compact)
def test_isomap():
# Test chaining KNeighborsTransformer and Isomap with
# neighbors_algorithm='precomputed'
algorithm = 'auto'
n_neighbors = 10
X, _ = make_blobs(random_state=0)
X2, _ = make_blobs(random_state=1)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, algorithm=algorithm,
mode='distance'),
Isomap(n_neighbors=n_neighbors, metric='precomputed'))
est_compact = Isomap(n_neighbors=n_neighbors,
neighbors_algorithm=algorithm)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_array_almost_equal(Xt_chain, Xt_compact)
Xt_chain = est_chain.transform(X2)
Xt_compact = est_compact.transform(X2)
assert_array_almost_equal(Xt_chain, Xt_compact)
# TODO: Remove filterwarning in 1.2
@pytest.mark.filterwarnings("ignore:.*TSNE will change.*:FutureWarning")
def test_tsne():
# Test chaining KNeighborsTransformer and TSNE
n_iter = 250
perplexity = 5
n_neighbors = int(3. * perplexity + 1)
rng = np.random.RandomState(0)
X = rng.randn(20, 2)
for metric in ['minkowski', 'sqeuclidean']:
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode='distance',
metric=metric),
TSNE(metric='precomputed', perplexity=perplexity,
method="barnes_hut", random_state=42, n_iter=n_iter,
square_distances=True))
est_compact = TSNE(metric=metric, perplexity=perplexity, n_iter=n_iter,
method="barnes_hut", random_state=42,
square_distances=True)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_array_almost_equal(Xt_chain, Xt_compact)
def test_lof_novelty_false():
# Test chaining KNeighborsTransformer and LocalOutlierFactor
n_neighbors = 4
rng = np.random.RandomState(0)
X = rng.randn(40, 2)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode='distance'),
LocalOutlierFactor(metric='precomputed', n_neighbors=n_neighbors,
novelty=False, contamination="auto"))
est_compact = LocalOutlierFactor(n_neighbors=n_neighbors, novelty=False,
contamination="auto")
pred_chain = est_chain.fit_predict(X)
pred_compact = est_compact.fit_predict(X)
assert_array_almost_equal(pred_chain, pred_compact)
def test_lof_novelty_true():
# Test chaining KNeighborsTransformer and LocalOutlierFactor
n_neighbors = 4
rng = np.random.RandomState(0)
X1 = rng.randn(40, 2)
X2 = rng.randn(40, 2)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode='distance'),
LocalOutlierFactor(metric='precomputed', n_neighbors=n_neighbors,
novelty=True, contamination="auto"))
est_compact = LocalOutlierFactor(n_neighbors=n_neighbors, novelty=True,
contamination="auto")
pred_chain = est_chain.fit(X1).predict(X2)
pred_compact = est_compact.fit(X1).predict(X2)
assert_array_almost_equal(pred_chain, pred_compact)
def test_kneighbors_regressor():
# Test chaining KNeighborsTransformer and classifiers/regressors
rng = np.random.RandomState(0)
X = 2 * rng.rand(40, 5) - 1
X2 = 2 * rng.rand(40, 5) - 1
y = rng.rand(40, 1)
n_neighbors = 12
radius = 1.5
# We precompute more neighbors than necessary, to have equivalence between
# k-neighbors estimator after radius-neighbors transformer, and vice-versa.
factor = 2
k_trans = KNeighborsTransformer(n_neighbors=n_neighbors, mode='distance')
k_trans_factor = KNeighborsTransformer(n_neighbors=int(
n_neighbors * factor), mode='distance')
r_trans = RadiusNeighborsTransformer(radius=radius, mode='distance')
r_trans_factor = RadiusNeighborsTransformer(radius=int(
radius * factor), mode='distance')
k_reg = KNeighborsRegressor(n_neighbors=n_neighbors)
r_reg = RadiusNeighborsRegressor(radius=radius)
test_list = [
(k_trans, k_reg),
(k_trans_factor, r_reg),
(r_trans, r_reg),
(r_trans_factor, k_reg),
]
for trans, reg in test_list:
# compare the chained version and the compact version
reg_compact = clone(reg)
reg_precomp = clone(reg)
reg_precomp.set_params(metric='precomputed')
reg_chain = make_pipeline(clone(trans), reg_precomp)
y_pred_chain = reg_chain.fit(X, y).predict(X2)
y_pred_compact = reg_compact.fit(X, y).predict(X2)
assert_array_almost_equal(y_pred_chain, y_pred_compact)
| bsd-3-clause |
jereze/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
Garrett-R/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
suecharo/pyHSICLasso | pyHSICLasso/input_data.py | 1 | 1239 | #!usr/bin/env python
# coding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
from future import standard_library
from scipy import io as spio
standard_library.install_aliases()
def input_csv_file(file_name):
df = pd.read_csv(file_name, sep=",")
X_in = df.ix[:, 1:].as_matrix().T
Y_in = df.ix[:, 0].as_matrix().reshape(1, len(df.index))
return X_in, Y_in
def input_tsv_file(file_name):
df = pd.read_csv(file_name, sep="\t")
X_in = df.ix[:, 1:].as_matrix().T
Y_in = df.ix[:, 0].as_matrix().reshape(1, len(df.index))
return X_in, Y_in
def input_matlab_file(file_name):
data = spio.loadmat(file_name)
if "X" in data.keys() and "Y" in data.keys():
X_in = data["X"]
Y_in = data["Y"]
elif "X_in" in data.keys() and "Y_in" in data.keys():
X_in = data["X_in"]
Y_in = data["Y_in"]
elif "x" in data.keys() and "y" in data.keys():
X_in = data["x"]
Y_in = data["y"]
elif "x_in" in data.keys() and "y_in" in data.keys():
X_in = data["x_in"]
Y_in = data["y_in"]
else:
raise KeyError("not find input data")
return X_in, Y_in
| mit |
vsl9/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
cl4rke/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
CG-F16-4-Rutgers/steersuite-rutgers | steerstats/tools/plotting/plot3ObjectiveCurve.py | 8 | 1861 |
import csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import sys
import scipy
from scipy.interpolate import bisplrep
from scipy.interpolate import bisplev
import numpy as np
# filename = '../../data/optimization/sf/multiObjective/SteerStatsOpt2.csv'
filename = sys.argv[1]
xs = []
ys = []
zs = []
if len(sys.argv) == 2:
csvfile = open(filename, 'r')
spamreader = csv.reader(csvfile, delimiter=',')
xs = []
ys = []
for row in spamreader:
xs.append(float(row[0]))
ys.append(float(row[1]))
zs.append(float(row[2]))
elif len(sys.argv) == 3:
for i in range(1, int(sys.argv[2])):
tmp_filename = filename + str(i) + ".log"
csvfile = open(tmp_filename, 'r')
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
xs.append(float(row[0]))
ys.append(float(row[1]))
zs.append(float(row[2]))
# print "xs = " + str(xs)
# print "ys = " + str(ys)
# print "zs = " + str(zs)
fig = plt.figure()
x_min = np.amin(xs)
x_max = np.amax(xs)
y_min = np.amin(ys)
y_max = np.amax(ys)
z_min = np.amin(zs)
z_max = np.amax(zs)
new_xs = (xs - x_min) / (x_max - x_min)
new_ys = (ys - y_min) / (y_max - y_min)
new_zs = (zs - z_min) / (z_max - z_min)
ax = fig.add_subplot(111, projection='3d')
# ax = fig.gca(projection='3d')
# ax.plot_wireframe(xs, ys, zs, rstride=1, cstride=1)
print new_xs + new_ys + new_zs
ax.plot_trisurf(new_xs, new_ys, new_zs, cmap=cm.jet, linewidth=0.2)
# ax.plot_trisurf(tri[:,0], tri[:,1], tri[:,2], linewidth=0.2)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_zlim([0.0, 1.0])
ax.set_xlabel('Efficency Metric')
ax.set_ylabel('PLE Metric')
ax.set_zlabel('Entropy Metric')
# ax.set_title("Multi-Objective Optimization")
# plt.axis("tight")
plt.show()
| gpl-3.0 |
robovm/robovm-studio | python/helpers/pydev/pydev_ipython/qt_for_kernel.py | 67 | 2337 | """ Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
| apache-2.0 |
2uller/LotF | App/Lib/site-packages/numpy/lib/function_base.py | 3 | 115310 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc',
'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp',
'add_newdoc_ufunc']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
from arraysetops import setdiff1d
from utils import deprecate
from _compiled_base import add_newdoc_ufunc
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError("Element at index %s in `bins` should be "
"a positive integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError("""
Found bin edge of size <= 0. Did you specify `bins` with
non-monotonic sequence?""")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M' :
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm' :
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
for axis in range(N):
# select out appropriate parts for this dimension
out = np.empty_like(f, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.copyto(ddmod, pi, where=(ddmod==-pi) & (dd > 0))
ph_correct = ddmod - dd;
_nx.copyto(ph_correct, 0, where=abs(dd)<discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
dt = y.dtype
if np.issubdtype(dt, np.integer) or np.issubdtype(dt, np.bool_):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.copyto(y, fill, where=mask)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmin.reduce(a, axis)
else:
return np.fmin.reduce(a.flat)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmax.reduce(a, axis)
else:
return np.fmax.reduce(a.flat)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the first
argument will be used to determine the number of outputs. The results of
this call will be cached if `cache` is `True` to prevent calling the
function twice. However, to implement the cache, the original function must
be wrapped which will slow down subsequent calls, so only do this if your
function is expensive.
The new keyword argument interface and `excluded` argument support further
degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False):
self.pyfunc = pyfunc
self.cache = cache
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError("Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if self.otypes and not self.excluded:
self._ufunc = None # Caching to improve default performance
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
assert args
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple functions
# at least -- this wrapping can almost double the execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
if c.size == 0:
# handle empty arrays
return c
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if sorted.shape == ():
# make 0-D arrays work
return sorted.item()
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest
neighbors is used if the normalized ranking does not match q exactly.
The same as the median if ``q=50``, the same as the minimum if ``q=0``
and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
3.5
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError("percentile must be either in the range [0,100]")
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from two or more coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
copy : bool, optional
If False, a view into the original arrays are returned in
order to conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous arrays.
Furthermore, more than one element of a broadcast array may refer to
a single memory location. If you need to write to the arrays, make
copies first.
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing keyword
argument. Giving the string 'ij' returns a meshgrid with matrix indexing,
while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case
with inputs of length M and N, the outputs are of shape (N, M) for 'xy'
indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of
length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M,
N, P) for 'ij' indexing. The difference is illustrated by the following
code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
if len(xi) < 2:
msg = 'meshgrid() takes 2 or more arguments (%d given)' % int(len(xi) > 0)
raise ValueError(msg)
args = np.atleast_1d(*xi)
ndim = len(args)
copy_ = kwargs.get('copy', True)
sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy')
if not indexing in ['xy', 'ij']:
raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)]
shape = [x.size for x in output]
if indexing == 'xy':
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
values = array(values, copy=False, ndmin=arr.ndim)
values = np.rollaxis(values, 0, axis+1)
obj = [obj] * values.shape[axis]
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| gpl-2.0 |
yunque/sms-tools | lectures/05-Sinusoidal-model/plots-code/synthesis-window.py | 22 | 1725 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
H = Ns/4
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
sw = np.zeros(Ns)
ow = triang(2*H);
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns)
bh = bh / sum(bh)
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
plt.figure(1, figsize=(9, 6))
plt.subplot(3,1,1)
plt.plot(np.arange(hNs), mY, 'r', lw=1.5)
plt.axis([0, hNs,-90,max(mY)+2])
plt.title("mY, Blackman-Harris, Ns = 512")
plt.subplot(3,1,2)
plt.plot(np.arange(-hNs,hNs), y, 'b', lw=1.5)
plt.plot(np.arange(-hNs,hNs), max(y)*bh/max(bh), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)+.1])
plt.title("y, size = Ns = 512 (Blackman-Harris window)")
yw = y * sw / max(sw)
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs,hNs), yw, 'b',lw=1.5)
plt.plot(np.arange(-hNs/2,hNs/2), max(y)*ow/max(ow), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(yw),max(yw)+.1])
plt.title("yw = y * triangular / Blackman Harris; size = Ns/2 = 256")
plt.tight_layout()
plt.savefig('synthesis-window.png')
plt.show()
| agpl-3.0 |
thunderhoser/GewitterGefahr | gewittergefahr/prediction_paper_2019/make_extreme_myrorss_figure.py | 1 | 18109 | """Makes figure with PMM composites of extreme examples for MYRORSS model.
PMM = probability-matched means
"Extreme examples" include best hits, best correct nulls, worst misses, worst
false alarms, high-probability examples (regardless of true label), and
low-probability examples (regardless of true label).
"""
import pickle
import os.path
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot
from gewittergefahr.gg_utils import general_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import saliency_maps
from gewittergefahr.deep_learning import model_interpretation
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import saliency_plotting
from gewittergefahr.plotting import imagemagick_utils
from gewittergefahr.scripts import plot_input_examples as plot_examples
REFL_HEIGHTS_M_AGL = numpy.array([2000, 6000, 10000], dtype=int)
MODEL_FILE_KEY = model_interpretation.MODEL_FILE_KEY
MEAN_PREDICTOR_MATRICES_KEY = model_interpretation.MEAN_PREDICTOR_MATRICES_KEY
MEAN_SOUNDING_PRESSURES_KEY = model_interpretation.MEAN_SOUNDING_PRESSURES_KEY
COLOUR_BAR_LENGTH = 0.25
PANEL_NAME_FONT_SIZE = 30
COLOUR_BAR_FONT_SIZE = 25
SOUNDING_FONT_SIZE = 30
CONVERT_EXE_NAME = '/usr/bin/convert'
TITLE_FONT_SIZE = 150
TITLE_FONT_TYPE = 'DejaVu-Sans-Bold'
SALIENCY_COLOUR_MAP_OBJECT = pyplot.get_cmap('binary')
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
INPUT_FILES_ARG_NAME = 'input_composite_file_names'
COMPOSITE_NAMES_ARG_NAME = 'composite_names'
PLOT_SALIENCY_ARG_NAME = 'plot_saliency'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
INPUT_FILES_HELP_STRING = (
'List of paths to input files. Each should contain a PMM composite over '
'many examples (storm objects). Specifically, each should be a Pickle file'
' with one dictionary, containing the keys "{0:s}" and "{1:s}".'
).format(MEAN_PREDICTOR_MATRICES_KEY, MODEL_FILE_KEY)
COMPOSITE_NAMES_HELP_STRING = (
'List of PMM-composite names (one per input file). The list should be '
'space-separated. In each list item, underscores will be replaced with '
'spaces.'
)
PLOT_SALIENCY_HELP_STRING = (
'Boolean flag. If 1, will plot saliency on top of radar fields.'
)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=INPUT_FILES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + COMPOSITE_NAMES_ARG_NAME, type=str, nargs='+', required=True,
help=COMPOSITE_NAMES_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + PLOT_SALIENCY_ARG_NAME, type=int, required=False, default=0,
help=PLOT_SALIENCY_HELP_STRING
)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING
)
def _smooth_maps(saliency_matrices, smoothing_radius_grid_cells):
"""Smooths saliency maps via Gaussian filter.
T = number of input tensors to the model
:param saliency_matrices: length-T list of numpy arrays.
:param smoothing_radius_grid_cells: e-folding radius (number of grid cells).
:return: saliency_matrices: Smoothed version of input.
"""
print((
'Smoothing saliency maps with Gaussian filter (e-folding radius of '
'{0:.1f} grid cells)...'
).format(
smoothing_radius_grid_cells
))
num_matrices = len(saliency_matrices)
num_examples = saliency_matrices[0].shape[0]
for j in range(num_matrices):
this_num_channels = saliency_matrices[j].shape[-1]
for i in range(num_examples):
for k in range(this_num_channels):
saliency_matrices[j][i, ..., k] = (
general_utils.apply_gaussian_filter(
input_matrix=saliency_matrices[j][i, ..., k],
e_folding_radius_grid_cells=smoothing_radius_grid_cells
)
)
return saliency_matrices
def _read_composite(pickle_file_name, read_saliency):
"""Reads PMM composite of examples (storm objects) from Pickle file.
T = number of input tensors to model
H_s = number of sounding heights
:param pickle_file_name: Path to input file.
:param read_saliency: Boolean flag.
:return: mean_predictor_matrices: length-T of numpy arrays, where the [i]th
item has dimensions of the [i]th input tensor to the model.
:return: model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
:return: mean_sounding_pressures_pa: numpy array (length H_s) of
sounding pressures.
:return: mean_saliency_matrices: Same as `mean_predictor_matrices` but with
saliency values.
"""
print('Reading data from: "{0:s}"...'.format(pickle_file_name))
file_handle = open(pickle_file_name, 'rb')
composite_dict = pickle.load(file_handle)
file_handle.close()
mean_predictor_matrices = composite_dict[MEAN_PREDICTOR_MATRICES_KEY]
mean_sounding_pressures_pa = composite_dict[MEAN_SOUNDING_PRESSURES_KEY]
for i in range(len(mean_predictor_matrices)):
mean_predictor_matrices[i] = numpy.expand_dims(
mean_predictor_matrices[i], axis=0
)
model_file_name = composite_dict[MODEL_FILE_KEY]
model_metafile_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.UPSAMPLE_REFLECTIVITY_KEY
] = False
all_refl_heights_m_agl = model_metadata_dict[
cnn.TRAINING_OPTION_DICT_KEY
][trainval_io.RADAR_HEIGHTS_KEY]
good_flags = numpy.array(
[h in REFL_HEIGHTS_M_AGL for h in all_refl_heights_m_agl], dtype=bool
)
good_indices = numpy.where(good_flags)[0]
mean_predictor_matrices[0] = (
mean_predictor_matrices[0][..., good_indices, :]
)
model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.RADAR_HEIGHTS_KEY
] = REFL_HEIGHTS_M_AGL
if not read_saliency:
return (
mean_predictor_matrices, model_metadata_dict,
mean_sounding_pressures_pa, None
)
mean_saliency_matrices = composite_dict[
saliency_maps.MEAN_SALIENCY_MATRICES_KEY
]
for i in range(len(mean_saliency_matrices)):
mean_saliency_matrices[i] = numpy.expand_dims(
mean_saliency_matrices[i], axis=0
)
mean_saliency_matrices = _smooth_maps(
saliency_matrices=mean_saliency_matrices, smoothing_radius_grid_cells=2
)
mean_saliency_matrices[0] = mean_saliency_matrices[0][..., good_indices, :]
return (
mean_predictor_matrices, model_metadata_dict,
mean_sounding_pressures_pa, mean_saliency_matrices
)
def _overlay_text(
image_file_name, x_offset_from_center_px, y_offset_from_top_px,
text_string):
"""Overlays text on image.
:param image_file_name: Path to image file.
:param x_offset_from_center_px: Center-relative x-coordinate (pixels).
:param y_offset_from_top_px: Top-relative y-coordinate (pixels).
:param text_string: String to overlay.
:raises: ValueError: if ImageMagick command (which is ultimately a Unix
command) fails.
"""
command_string = (
'"{0:s}" "{1:s}" -gravity north -pointsize {2:d} -font "{3:s}" '
'-fill "rgb(0, 0, 0)" -annotate {4:+d}{5:+d} "{6:s}" "{1:s}"'
).format(
CONVERT_EXE_NAME, image_file_name, TITLE_FONT_SIZE, TITLE_FONT_TYPE,
x_offset_from_center_px, y_offset_from_top_px, text_string
)
exit_code = os.system(command_string)
if exit_code == 0:
return
raise ValueError(imagemagick_utils.ERROR_STRING)
def _plot_composite(
composite_file_name, composite_name_abbrev, composite_name_verbose,
plot_saliency, output_dir_name):
"""Plots one composite.
:param composite_file_name: Path to input file. Will be read by
`_read_composite`.
:param composite_name_abbrev: Abbreviated name for composite. Will be used
in names of output files.
:param composite_name_verbose: Verbose name for composite. Will be used as
figure title.
:param plot_saliency: See documentation at top of file.
:param output_dir_name: Path to output directory. Figures will be saved
here.
:return: radar_figure_file_name: Path to file with radar figure for this
composite.
:return: sounding_figure_file_name: Path to file with sounding figure for
this composite.
"""
(
mean_predictor_matrices, model_metadata_dict,
mean_sounding_pressures_pa, mean_saliency_matrices
) = _read_composite(
pickle_file_name=composite_file_name, read_saliency=plot_saliency
)
refl_heights_m_agl = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.RADAR_HEIGHTS_KEY
]
num_refl_heights = len(refl_heights_m_agl)
handle_dict = plot_examples.plot_one_example(
list_of_predictor_matrices=mean_predictor_matrices,
model_metadata_dict=model_metadata_dict,
pmm_flag=True, plot_sounding=True,
sounding_pressures_pascals=mean_sounding_pressures_pa,
allow_whitespace=True, plot_panel_names=True,
panel_name_font_size=PANEL_NAME_FONT_SIZE,
add_titles=False, label_colour_bars=True,
colour_bar_length=COLOUR_BAR_LENGTH,
colour_bar_font_size=COLOUR_BAR_FONT_SIZE,
sounding_font_size=SOUNDING_FONT_SIZE, num_panel_rows=num_refl_heights)
if plot_saliency:
axes_object_matrices = handle_dict[plot_examples.RADAR_AXES_KEY]
all_saliency_values = numpy.concatenate((
numpy.ravel(mean_saliency_matrices[0]),
numpy.ravel(mean_saliency_matrices[1])
))
max_contour_value = numpy.percentile(
numpy.absolute(all_saliency_values), 99
)
this_matrix = numpy.flip(
mean_saliency_matrices[0][0, ..., 0], axis=0
)
saliency_plotting.plot_many_2d_grids_with_contours(
saliency_matrix_3d=this_matrix,
axes_object_matrix=axes_object_matrices[0],
colour_map_object=SALIENCY_COLOUR_MAP_OBJECT,
max_absolute_contour_level=max_contour_value,
contour_interval=max_contour_value / 10,
row_major=True
)
this_matrix = numpy.flip(
mean_saliency_matrices[1][0, ...], axis=0
)
saliency_plotting.plot_many_2d_grids_with_contours(
saliency_matrix_3d=this_matrix,
axes_object_matrix=axes_object_matrices[1],
colour_map_object=SALIENCY_COLOUR_MAP_OBJECT,
max_absolute_contour_level=max_contour_value,
contour_interval=max_contour_value / 10,
row_major=False
)
sounding_figure_object = handle_dict[plot_examples.SOUNDING_FIGURE_KEY]
sounding_figure_file_name = '{0:s}/{1:s}_sounding.jpg'.format(
output_dir_name, composite_name_abbrev
)
print('Saving figure to: "{0:s}"...'.format(sounding_figure_file_name))
sounding_figure_object.savefig(
sounding_figure_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(sounding_figure_object)
imagemagick_utils.resize_image(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
imagemagick_utils.trim_whitespace(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25
)
_overlay_text(
image_file_name=sounding_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose
)
imagemagick_utils.trim_whitespace(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name,
border_width_pixels=10
)
refl_figure_object = handle_dict[plot_examples.RADAR_FIGURES_KEY][0]
refl_figure_file_name = '{0:s}/{1:s}_reflectivity.jpg'.format(
output_dir_name, composite_name_abbrev
)
print('Saving figure to: "{0:s}"...'.format(refl_figure_file_name))
refl_figure_object.savefig(
refl_figure_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(refl_figure_object)
shear_figure_object = handle_dict[plot_examples.RADAR_FIGURES_KEY][1]
shear_figure_file_name = '{0:s}/{1:s}_shear.jpg'.format(
output_dir_name, composite_name_abbrev
)
print('Saving figure to: "{0:s}"...'.format(shear_figure_file_name))
shear_figure_object.savefig(
shear_figure_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(shear_figure_object)
radar_figure_file_name = '{0:s}/{1:s}_radar.jpg'.format(
output_dir_name, composite_name_abbrev
)
print('Concatenating panels to: "{0:s}"...'.format(radar_figure_file_name))
imagemagick_utils.concatenate_images(
input_file_names=[refl_figure_file_name, shear_figure_file_name],
output_file_name=radar_figure_file_name,
num_panel_rows=1, num_panel_columns=2, border_width_pixels=50,
extra_args_string='-gravity south'
)
imagemagick_utils.resize_image(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
imagemagick_utils.trim_whitespace(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
border_width_pixels=TITLE_FONT_SIZE + 25
)
_overlay_text(
image_file_name=radar_figure_file_name,
x_offset_from_center_px=0, y_offset_from_top_px=0,
text_string=composite_name_verbose
)
imagemagick_utils.trim_whitespace(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
border_width_pixels=10
)
return radar_figure_file_name, sounding_figure_file_name
def _run(composite_file_names, composite_names, plot_saliency, output_dir_name):
"""Makes figure with extreme examples for MYRORSS model.
This is effectively the main method.
:param composite_file_names: See documentation at top of file.
:param composite_names: Same.
:param plot_saliency: Same.
:param output_dir_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
num_composites = len(composite_file_names)
expected_dim = numpy.array([num_composites], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(composite_names), exact_dimensions=expected_dim
)
composite_names_abbrev = [
n.replace('_', '-').lower() for n in composite_names
]
composite_names_verbose = [n.replace('_', ' ') for n in composite_names]
radar_panel_file_names = [None] * num_composites
sounding_panel_file_names = [None] * num_composites
for i in range(num_composites):
radar_panel_file_names[i], sounding_panel_file_names[i] = (
_plot_composite(
composite_file_name=composite_file_names[i],
composite_name_abbrev=composite_names_abbrev[i],
composite_name_verbose=composite_names_verbose[i],
plot_saliency=plot_saliency, output_dir_name=output_dir_name
)
)
print('\n')
radar_figure_file_name = '{0:s}/radar_concat.jpg'.format(output_dir_name)
print('Concatenating panels to: "{0:s}"...'.format(radar_figure_file_name))
num_panel_rows = int(numpy.floor(
numpy.sqrt(num_composites)
))
num_panel_columns = int(numpy.ceil(
float(num_composites) / num_panel_rows
))
imagemagick_utils.concatenate_images(
input_file_names=radar_panel_file_names,
output_file_name=radar_figure_file_name,
num_panel_rows=num_panel_rows, num_panel_columns=num_panel_columns
)
imagemagick_utils.trim_whitespace(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name
)
imagemagick_utils.resize_image(
input_file_name=radar_figure_file_name,
output_file_name=radar_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
sounding_figure_file_name = '{0:s}/sounding_concat.jpg'.format(
output_dir_name
)
print('Concatenating panels to: "{0:s}"...'.format(
sounding_figure_file_name
))
imagemagick_utils.concatenate_images(
input_file_names=sounding_panel_file_names,
output_file_name=sounding_figure_file_name,
num_panel_rows=num_panel_rows, num_panel_columns=num_panel_columns
)
imagemagick_utils.trim_whitespace(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name
)
imagemagick_utils.resize_image(
input_file_name=sounding_figure_file_name,
output_file_name=sounding_figure_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
composite_file_names=getattr(INPUT_ARG_OBJECT, INPUT_FILES_ARG_NAME),
composite_names=getattr(INPUT_ARG_OBJECT, COMPOSITE_NAMES_ARG_NAME),
plot_saliency=bool(getattr(INPUT_ARG_OBJECT, PLOT_SALIENCY_ARG_NAME)),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| mit |
untom/scikit-learn | setup.py | 143 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/io/conftest.py | 4 | 1891 | import pytest
from pandas.io.parsers import read_csv
@pytest.fixture
def tips_file(datapath):
"""Path to the tips dataset"""
return datapath('io', 'parser', 'data', 'tips.csv')
@pytest.fixture
def jsonl_file(datapath):
"""Path a JSONL dataset"""
return datapath('io', 'parser', 'data', 'items.jsonl')
@pytest.fixture
def salaries_table(datapath):
"""DataFrame with the salaries dataset"""
return read_csv(datapath('io', 'parser', 'data', 'salaries.csv'), sep='\t')
@pytest.fixture
def s3_resource(tips_file, jsonl_file):
"""Fixture for mocking S3 interaction.
The primary bucket name is "pandas-test". The following datasets
are loaded.
- tips.csv
- tips.csv.gz
- tips.csv.bz2
- items.jsonl
A private bucket "cant_get_it" is also created. The boto3 s3 resource
is yielded by the fixture.
"""
pytest.importorskip('s3fs')
boto3 = pytest.importorskip('boto3')
moto = pytest.importorskip('moto')
test_s3_files = [
('tips.csv', tips_file),
('tips.csv.gz', tips_file + '.gz'),
('tips.csv.bz2', tips_file + '.bz2'),
('items.jsonl', jsonl_file),
]
def add_tips_files(bucket_name):
for s3_key, file_name in test_s3_files:
with open(file_name, 'rb') as f:
conn.Bucket(bucket_name).put_object(
Key=s3_key,
Body=f)
try:
s3 = moto.mock_s3()
s3.start()
# see gh-16135
bucket = 'pandas-test'
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
add_tips_files(bucket)
conn.create_bucket(Bucket='cant_get_it', ACL='private')
add_tips_files('cant_get_it')
yield conn
except: # noqa: flake8
pytest.skip("failure to use s3 resource")
finally:
s3.stop()
| bsd-3-clause |
rexshihaoren/scikit-learn | sklearn/tree/tree.py | 12 | 34690 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
azjps/bokeh | examples/app/gapminder/main.py | 3 | 2668 | # -*- coding: utf-8 -*-
import pandas as pd
from bokeh.core.properties import field
from bokeh.io import curdoc
from bokeh.layouts import layout
from bokeh.models import (
ColumnDataSource, HoverTool, SingleIntervalTicker, Slider, Button, Label,
CategoricalColorMapper,
)
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions_list = process_data()
sources = {}
region_name = regions_df.Group
region_name.name = 'region'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
df = pd.concat([fertility, life, population, region_name], axis=1)
df = df.fillna('NaN')
sources[year] = ColumnDataSource(df)
source = sources[years[0]]
plot = figure(x_range=(1, 9), y_range=(20, 100), title='Gapminder Data', plot_height=300)
plot.xaxis.ticker = SingleIntervalTicker(interval=1)
plot.xaxis.axis_label = "Children per woman (total fertility)"
plot.yaxis.ticker = SingleIntervalTicker(interval=20)
plot.yaxis.axis_label = "Life expectancy at birth (years)"
label = Label(x=1.1, y=18, text=str(years[0]), text_font_size='70pt', text_color='#eeeeee')
plot.add_layout(label)
color_mapper = CategoricalColorMapper(palette=Spectral6, factors=regions_list)
plot.circle(
x='fertility',
y='life',
size='population',
source=source,
fill_color={'field': 'region', 'transform': color_mapper},
fill_alpha=0.8,
line_color='#7c7e71',
line_width=0.5,
line_alpha=0.5,
legend=field('region'),
)
plot.add_tools(HoverTool(tooltips="@index", show_arrow=False, point_policy='follow_mouse'))
def animate_update():
year = slider.value + 1
if year > years[-1]:
year = years[0]
slider.value = year
def slider_update(attrname, old, new):
year = slider.value
label.text = str(year)
source.data = sources[year].data
slider = Slider(start=years[0], end=years[-1], value=years[0], step=1, title="Year")
slider.on_change('value', slider_update)
def animate():
if button.label == '► Play':
button.label = '❚❚ Pause'
curdoc().add_periodic_callback(animate_update, 200)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(animate_update)
button = Button(label='► Play', width=60)
button.on_click(animate)
layout = layout([
[plot],
[slider, button],
], sizing_mode='scale_width')
curdoc().add_root(layout)
curdoc().title = "Gapminder"
| bsd-3-clause |
vybstat/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
bsipocz/astropy | astropy/time/tests/test_basic.py | 1 | 65037 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import functools
import datetime
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import catch_warnings, pytest
from astropy.utils.exceptions import AstropyDeprecationWarning, ErfaWarning
from astropy.utils import isiterable, iers
from astropy.time import (Time, TimeDelta, ScaleValueError, STANDARD_TIME_SCALES,
TimeString, TimezoneInfo)
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy import _erfa as erfa
from astropy.table import Column
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
allclose_year = functools.partial(np.allclose, rtol=2. ** -52,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+0.00037179926839122024,
-0.5+0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.)/10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('format_', Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == 'tai'
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5 = t4[3]
assert t5.location == t4.location[3]
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.500000'
assert t.ut1.iso == '2006-01-15 21:24:37.834100'
assert t.tai.iso == '2006-01-15 21:25:10.500000'
assert t.tt.iso == '2006-01-15 21:25:42.684000'
assert t.tcg.iso == '2006-01-15 21:25:43.322690'
assert t.tdb.iso == '2006-01-15 21:25:42.684373'
assert t.tcb.iso == '2006-01-15 21:25:56.893952'
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5']*3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp('auto_download', False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
dt64 = np.datetime64('2012-06-18T02:00:05.453000000', format='datetime64')
Time(dt64, format='datetime64', scale='tai')
Time([dt64, dt64], format='datetime64', scale='tai')
def test_local_format_transforms(self):
"""
Test trasformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001/3600./24./365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500'
assert_allclose(t.byear, 2006.04217888831, atol=0.001/3600./24./365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001/3600./24./365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t2 = t.gps
with pytest.raises(ScaleValueError):
t2 = t.unix
with pytest.raises(ScaleValueError):
t2 = t.cxcsec
with pytest.raises(ScaleValueError):
t2 = t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
dt64_2 = np.datetime64('2000-01-02')
t = Time(dt64, scale='utc', precision=9, format='datetime64')
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64
t = Time(dt64_2, scale='utc', precision=3, format='datetime64')
assert t.iso == '2000-01-02 00:00:00.000'
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale='utc', format='datetime64')
assert np.all(t.value == [dt64, dt64_2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime64 == np.datetime64('2000-01-01T01:01:01.123456789')
# broadcasting
dt3 = (dt64 + (dt64_2-dt64)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc', format='datetime64')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format='datetime64')
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format='datetime64'))
assert Time(t3[2, 0], format='datetime64') == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f'{year:04d}-{month:02d}'
yyyy_mm_dd = f'{year:04d}-{month:02d}-{day:02d}'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = f'{year:04d}-07-01'
else:
yyyy_mm_dd_plus1 = '{:04d}-01-01'.format(year+1)
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
t6 = Time(t1, scale='local')
class TestVal2:
"""Tests related to val2"""
def test_val2_ignored(self):
"""Test that val2 is ignored for string input"""
t = Time('2001:001', 'ignored', scale='utc')
assert t.yday == '2001:001:00:00:00.000'
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000',
'2000-01-01T01:01:01.000',
'2000-01-01T01:01:01.123']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+02000-01-01T01:01:01.123']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'-00594-01-01T00:00:00.000']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+10594-01-01T00:00:00.000']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc")):
with catch_warnings(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][:inputs[0].index("(")], format="isot",
scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:00.123456789(UTC)')
t = t.tai
assert t.isot == '1999-01-01T00:00:32.123'
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)')
t = t.utc
assert t.isot == '1999-01-01T00:00:00.123'
# Check scale consistency
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="tai")
assert t.scale == "tai"
with catch_warnings(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(ET)', scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'tt'
t = Time('J2000')
assert t.scale == 'tt'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Value from:
# d = datetime.datetime(2000, 1, 1)
# matplotlib.pylab.dates.date2num(d)
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, 730120.0, atol=1e-5, rtol=0)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with catch_warnings() as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert 'bad day (JD computed)' in str(w[0].message)
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd', scale='tai')
assert t.fits == '0001-01-01T00:00:00.000'
t = Time(1721425.5 - 366., format='jd', scale='tai')
assert t.fits == '+00000-01-01T00:00:00.000'
t = Time(1721425.5 - 366. - 365., format='jd', scale='tai')
assert t.fits == '-00001-01-01T00:00:00.000'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000'
t = Time(5373484.5, -1./24./3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion():
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10*u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r'does not support leap seconds'):
Time('2015-06-30 23:59:60.000').to_datetime()
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[[f'{y:04d}-{m:02d}-{d:02d}' for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is not writeable
t = Time('2000:001', scale='utc')
with pytest.raises(ValueError) as err:
t[()] = '2000:002'
assert 'scalar Time object is read-only.' in str(err.value)
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err.value)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err.value)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err.value)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot', scale='tai')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S',
scale='tai')
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = '2007-May-04 21:08:12.123'
time_object = Time('2007-05-04 21:08:12.123')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S.%f')
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01.123', '1998-Jan-01 00:00:02.000001'],
['1998-Jan-01 00:00:03.000900', '1998-Jan-01 00:00:04.123456']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01.123', '1998-01-01 00:00:02.000001'],
['1998-01-01 00:00:03.000900', '1998-01-01 00:00:04.123456']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S.%f')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00.123'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == time_string
def test_strftime_scalar_fracsec_precision():
time_string = '2010-09-03 06:00:00.123123123'
t = Time(time_string)
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123'
t.precision = 9
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123123123'
def test_strftime_array_fracsec():
tstrings = ['2010-09-03 00:00:00.123000', '2005-09-03 06:00:00.000001',
'1995-12-31 23:59:60.000900']
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f').tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format='unix')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, '1970-01-01 00:01:00')
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time('1970-01-01 00:01:00'))
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time('1970-01-01 00:01:00')])
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format='unix'))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format='unix'))
def test_insert_exceptions():
tm = Time(1, format='unix')
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert 'cannot insert into scalar' in str(err.value)
tm = Time([1, 2], format='unix')
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert 'axis must be 0' in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert 'obj arg must be an integer' in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert 'index -100 is out of bounds for axis 0 with size 2' in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
t = Time(dt64, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format='cxcsec', location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format='cxcsec', location=loc)
t2 = Time(1, format='cxcsec')
assert hash(t) != hash(t2)
t = Time('2000:180', scale='utc')
t2 = Time(t, scale='tai')
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format='sec')
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
| bsd-3-clause |
srinathv/vispy | vispy/testing/_testing.py | 12 | 12441 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from __future__ import print_function
import numpy as np
import sys
import os
import inspect
from distutils.version import LooseVersion
from ..ext.six import string_types
from ..util import use_log_level
###############################################################################
# Adapted from Python's unittest2
# http://docs.python.org/2/license.html
try:
from unittest.case import SkipTest
except ImportError:
try:
from unittest2.case import SkipTest
except ImportError:
class SkipTest(Exception):
pass
def _safe_rep(obj, short=False):
"""Helper for assert_* ports"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < 80:
return result
return result[:80] + ' [truncated]...'
def _safe_str(obj):
"""Helper for assert_* ports"""
try:
return str(obj)
except Exception:
return object.__str__(obj)
def _format_msg(msg, std_msg):
"""Helper for assert_* ports"""
if msg is None:
msg = std_msg
else:
try:
msg = '%s : %s' % (std_msg, msg)
except UnicodeDecodeError:
msg = '%s : %s' % (_safe_str(std_msg), _safe_str(msg))
return msg
def nottest(func):
"""Decorator to mark a function or method as *not* a test
"""
func.__test__ = False
return func
def assert_raises(exp, func, *args, **kwargs):
"""Backport"""
try:
func(*args, **kwargs)
except exp:
return
std_msg = '%s not raised' % (_safe_rep(exp))
raise AssertionError(_format_msg(None, std_msg))
def assert_in(member, container, msg=None):
"""Backport"""
if member in container:
return
std_msg = '%s not found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_true(x, msg=None):
"""Backport"""
if x:
return
std_msg = '%s is not True' % (_safe_rep(x),)
raise AssertionError(_format_msg(msg, std_msg))
def assert_equal(x, y, msg=None):
"""Backport"""
if x == y:
return
std_msg = '%s not equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_equal(x, y, msg=None):
"""Backport"""
if x != y:
return
std_msg = '%s equal to %s' % (_safe_rep(x), _safe_rep(y))
raise AssertionError(_format_msg(msg, std_msg))
def assert_not_in(member, container, msg=None):
"""Backport"""
if member not in container:
return
std_msg = '%s found in %s' % (_safe_rep(member), _safe_rep(container))
raise AssertionError(_format_msg(msg, std_msg))
def assert_is(expr1, expr2, msg=None):
"""Backport"""
if expr1 is not expr2:
std_msg = '%s is not %s' % (_safe_rep(expr1), _safe_rep(expr2))
raise AssertionError(_format_msg(msg, std_msg))
class raises(object):
"""Helper class to test exception raising"""
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, exc_typ, exc, tb):
if isinstance(exc, self.exc):
return True
elif exc is None:
raise AssertionError("Expected %s (no exception raised)" %
self.exc.__name__)
else:
raise AssertionError("Expected %s, got %s instead (%s)" %
(self.exc.__name__, type(exc).__name__), exc)
###############################################################################
# GL stuff
def has_pyopengl():
try:
from OpenGL import GL # noqa, analysis:ignore
except Exception:
return False
else:
return True
def requires_pyopengl():
return np.testing.dec.skipif(not has_pyopengl(), 'Requires PyOpenGL')
###############################################################################
# App stuff
def has_backend(backend, has=(), capable=(), out=()):
from ..app.backends import BACKENDMAP
using = os.getenv('_VISPY_TESTING_APP', None)
if using is not None and using != backend:
# e.g., we are on a 'pyglet' run but the test requires PyQt4
ret = (False,) if len(out) > 0 else False
for o in out:
ret += (None,)
return ret
# let's follow the standard code path
module_name = BACKENDMAP[backend.lower()][1]
with use_log_level('warning', print_msg=False):
mod = __import__('app.backends.%s' % module_name, globals(), level=2)
mod = getattr(mod.backends, module_name)
good = mod.testable
for h in has:
good = (good and getattr(mod, 'has_%s' % h))
for cap in capable:
good = (good and mod.capability[cap])
ret = (good,) if len(out) > 0 else good
for o in out:
ret += (getattr(mod, o),)
return ret
def has_application(backend=None, has=(), capable=()):
"""Determine if a suitable app backend exists"""
from ..app.backends import BACKEND_NAMES
# avoid importing other backends if we don't need to
if backend is None:
for backend in BACKEND_NAMES:
if has_backend(backend, has=has, capable=capable):
good = True
msg = backend
break
else:
good = False
msg = 'Requires application backend'
else:
good, why = has_backend(backend, has=has, capable=capable,
out=['why_not'])
if not good:
msg = 'Requires %s: %s' % (backend, why)
else:
msg = backend
return good, msg
def composed(*decs):
def deco(f):
for dec in reversed(decs):
f = dec(f)
return f
return deco
def requires_application(backend=None, has=(), capable=()):
"""Return a decorator for tests that require an application"""
good, msg = has_application(backend, has, capable)
dec_backend = np.testing.dec.skipif(not good, "Skipping test: %s" % msg)
try:
import pytest
except Exception:
return dec_backend
dec_app = pytest.mark.vispy_app_test
return composed(dec_app, dec_backend)
def requires_img_lib():
"""Decorator for tests that require an image library"""
from ..io import _check_img_lib
if sys.platform.startswith('win'):
has_img_lib = False # PIL breaks tests on windows (!)
else:
has_img_lib = not all(c is None for c in _check_img_lib())
return np.testing.dec.skipif(not has_img_lib, 'imageio or PIL required')
def has_ipython(version='3.0'):
"""function that checks the presence of IPython"""
# typecast version to a string, in case an integer is given
version = str(version)
try:
import IPython # noqa
except Exception:
return False, "IPython library not found"
else:
if LooseVersion(IPython.__version__) >= LooseVersion(version):
return True, "IPython present"
else:
message = (
"current IPython version: (%s) is "
"older than expected version: (%s)") % \
(IPython.__version__, version)
return False, message
def requires_ipython(version='3.0'):
ipython_present, message = has_ipython(version)
return np.testing.dec.skipif(not ipython_present, message)
def has_matplotlib(version='1.2'):
"""Determine if mpl is a usable version"""
try:
import matplotlib
except Exception:
has_mpl = False
else:
if LooseVersion(matplotlib.__version__) >= LooseVersion(version):
has_mpl = True
else:
has_mpl = False
return has_mpl
###############################################################################
# Visuals stuff
def _has_scipy(min_version):
try:
assert isinstance(min_version, string_types)
import scipy # noqa, analysis:ignore
from distutils.version import LooseVersion
this_version = LooseVersion(scipy.__version__)
if this_version < min_version:
return False
except Exception:
return False
else:
return True
def requires_scipy(min_version='0.13'):
return np.testing.dec.skipif(not _has_scipy(min_version),
'Requires Scipy version >= %s' % min_version)
@nottest
def TestingCanvas(bgcolor='black', size=(100, 100), dpi=None, decorate=False,
**kwargs):
"""Class wrapper to avoid importing scene until necessary"""
# On Windows decorations can force windows to be an incorrect size
# (e.g., instead of 100x100 they will be 100x248), having no
# decorations works around this
from ..scene import SceneCanvas
class TestingCanvas(SceneCanvas):
def __init__(self, bgcolor, size, dpi, decorate, **kwargs):
self._entered = False
self._wanted_vp = None
SceneCanvas.__init__(self, bgcolor=bgcolor, size=size,
dpi=dpi, decorate=decorate,
**kwargs)
def __enter__(self):
SceneCanvas.__enter__(self)
# sometimes our window can be larger than our requsted draw
# area (e.g. on Windows), and this messes up our tests that
# typically use very small windows. Here we "fix" it.
scale = np.array(self.physical_size) / np.array(self.size, float)
scale = int(np.round(np.mean(scale)))
self._wanted_vp = 0, 0, size[0] * scale, size[1] * scale
self.context.set_state(clear_color=self._bgcolor)
self.context.set_viewport(*self._wanted_vp)
self._entered = True
return self
def draw_visual(self, visual, event=None):
if not self._entered:
return
SceneCanvas.draw_visual(self, visual, event)
self.context.finish()
return TestingCanvas(bgcolor, size, dpi, decorate, **kwargs)
@nottest
def save_testing_image(image, location):
from ..gloo.util import _screenshot
from ..util import make_png
if image == "screenshot":
image = _screenshot(alpha=False)
with open(location+'.png', 'wb') as fid:
fid.write(make_png(image))
@nottest
def run_tests_if_main():
"""Run tests in a given file if it is run as a script"""
local_vars = inspect.currentframe().f_back.f_locals
if not local_vars.get('__name__', '') == '__main__':
return
# we are in a "__main__"
fname = local_vars['__file__']
# Run ourselves. post-mortem debugging!
try:
import faulthandler
faulthandler.enable()
except Exception:
pass
import __main__
try:
import pytest
pytest.main(['-s', '--tb=short', fname])
except ImportError:
print('==== Running tests in script\n==== %s' % fname)
run_tests_in_object(__main__)
print('==== Tests pass')
def run_tests_in_object(ob):
# Setup
for name in dir(ob):
if name.lower().startswith('setup'):
print('Calling %s' % name)
getattr(ob, name)()
# Exec
for name in sorted(dir(ob), key=lambda x: x.lower()): # consistent order
val = getattr(ob, name)
if name.startswith('_'):
continue
elif callable(val) and (name[:4] == 'test' or name[-4:] == 'test'):
print('Running test-func %s ... ' % name, end='')
try:
val()
print('ok')
except Exception as err:
if 'skiptest' in err.__class__.__name__.lower():
print('skip')
else:
raise
elif isinstance(val, type) and 'Test' in name:
print('== Running test-class %s' % name)
run_tests_in_object(val())
print('== Done with test-class %s' % name)
# Teardown
for name in dir(ob):
if name.lower().startswith('teardown'):
print('Calling %s' % name)
getattr(ob, name)()
| bsd-3-clause |
q1ang/vnpy | vn.datayes/api.py | 19 | 45371 | #encoding: UTF-8
import os
import json
import time
import requests
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread, Timer
from pymongo import MongoClient
from requests.exceptions import ConnectionError
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError)
class Config(object):
"""
Json-like config object.
The Config contains all kinds of settings and user info that
could be useful in the implementation of Api wrapper.
privates
--------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config.
- domain: string, api domain.
- ssl: boolean, specifes http or https usage.
- version: string, version of the api. Currently 'v1'.
- header: dictionary; the request header which contains
authorization infomation.
"""
head = 'my config'
toke_ = '44ebc0f058981f85382595f9f15f967' + \
'0c7eaf2695de30dd752e8f33e9022baa0'
token = '7c2e59e212dbff90ffd6b382c7afb57' + \
'bc987a99307d382b058af6748f591d723'
body = {
'ssl': False,
'domain': 'api.wmcloud.com/data',
'version': 'v1',
'header': {
'Connection' : 'keep-alive',
'Authorization': 'Bearer ' + token
}
}
def __init__(self, head=None, token=None, body=None):
"""
Reloaded constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
if head:
self.head = head
if token:
self.token = token
if body:
self.body = body
def view(self):
""" Prettify printing method. """
config_view = {
'config_head' : self.head,
'config_body' : self.body,
'user_token' : self.token
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# Data containers.
class BaseDataContainer(object):
"""
Basic data container. The fundamental of all other data
container objects defined within this module.
privates
--------
* head: string; the head(type) of data container.
* body: dictionary; data content. Among all sub-classes that inherit
BaseDataContainer, type(body) varies according to the financial meaning
that the child data container stands for.
- History:
- Bar
"""
head = 'ABSTRACT_DATA'
body = dict()
pass
class History(BaseDataContainer):
"""
Historical data container. The foundation of all other pandas
DataFrame-like two dimensional data containers for this module.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ...],
'retCode': 1,
'retMsg': 'Success'}.
So the body of data is actually in data['data'], which is
our target when constructing the container.
"""
try:
assert 'data' in data
self.body = pd.DataFrame(data['data'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
class Bar(History):
"""
Historical Bar data container. Inherits from History()
DataFrame-like two dimensional data containers for Bar data.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY_BAR'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [{
'exchangeCD': 'XSHG',
'utcOffset': '+08:00',
'unit': 1,
'currencyCD': 'CNY',
'barBodys': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ... ],
'ticker': '000001',
'shortNM': u'\u4e0a\u8bc1\u6307\u6570'
}, ...(other tickers) ],
'retCode': 1,
'retMsg': 'Success'}.
When requesting 1 ticker, json['data'] layer has only one element;
we expect that this is for data collectioning for multiple tickers,
which is currently impossible nevertheless.
So we want resp.json()['data'][0]['barBodys'] for Bar data contents,
and that is what we go into when constructing Bar.
"""
try:
assert 'data' in data
assert 'barBodys' in data['data'][0]
self.body = pd.DataFrame(data['data'][0]['barBodys'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
#----------------------------------------------------------------------
# Datayes Api class
class PyApi(object):
"""
Python based Datayes Api object.
PyApi should be initialized with a Config json. The config must be complete,
in that once constructed, the private variables like request headers,
tokens, etc. become constant values (inherited from config), and will be
consistantly referred to whenever make requests.
privates
--------
* _config: Config object; a container of all useful settings when making
requests.
* _ssl, _domain, _domain_stream, _version, _header, _account_id:
boolean, string, string, string, dictionary, integer;
just private references to the items in Config. See the docs of Config().
* _session: requests.session object.
examples
--------
"""
_config = Config()
# request stuffs
_ssl = False
_domain = ''
_version = 'v1'
_header = dict()
_token = None
_session = requests.session()
def __init__(self, config):
"""
Constructor.
parameters
----------
* config: Config object; specifies user and connection configs.
"""
if config.body:
try:
self._config = config
self._ssl = config.body['ssl']
self._domain = config.body['domain']
self._version = config.body['version']
self._header = config.body['header']
except KeyError:
msg = '[API]: Unable to configure api; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[API]: Unable to configure api; ' + str(e)
raise VNPAST_ConfigError(msg)
# configure protocol
if self._ssl:
self._domain = 'https://' + self._domain
else:
self._domain = 'http://' + self._domain
def __access(self, url, params, method='GET'):
"""
request specific data from given url with parameters.
parameters
----------
* url: string.
* params: dictionary.
* method: string; 'GET' or 'POST', request method.
"""
try:
assert type(url) == str
assert type(params) == dict
except AssertionError,e:
raise e('[API]: Unvalid url or parameter input.')
if not self._session:
s = requests.session()
else: s = self._session
# prepare and send the request.
try:
req = requests.Request(method,
url = url,
headers = self._header,
params = params)
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=False, verify=True)
if method == 'GET':
assert resp.status_code == 200
elif method == 'POST':
assert resp.status_code == 201
return resp
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise VNPAST_RequestError(msg)
pass
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise VNPAST_RequestError(msg)
#----------------------------------------------------------------------
# directly get methods - Market data
def get_equity_M1_one(self,
start='', end='', secID='000001.XSHG'):
"""
Get 1-minute intraday bar data of one security.
parameters
----------
* start, end: string; Time mark formatted in 'HH:MM'. Specifies the
start/end point of bar. Note that the requested date is the
latest trading day (only one day), and the default start/end time is
'09:30' and min(now, '15:00'). Effective minute bars range from
09:30 - 11:30 in the morning and 13:01 - 15:00 in the afternoon.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
"""
url = '{}/{}/api/market/getBarRTIntraDay.json'.format(
self._domain, self._version)
params = {
'startTime': start,
'endTime': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
print resp.json()
data = Bar(resp.json())
return data
except AssertionError: return 0
def get_equity_M1(self, field='', start='20130701', end='20130730',
secID='000001.XSHG', output='df'):
"""
1-minute bar in a month, currently unavailable.
parameters
----------
* field: string; variables that are to be requested.
* start, end: string; Time mark formatted in 'YYYYMMDD'.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
url = '{}/{}/api/market/getBarHistDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'startDate': start,
'endDate': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = Bar(resp.json())
elif output == 'list':
data = resp.json()['data'][0]['barBodys']
return data
except AssertionError: return 0
def get_equity_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one security.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for securities)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- actPreClosePrice* double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- dealAmount* integer.
- turnoverRate double.
- accumAdjFactor* double.
- negMarketValue* double.
- marketValue* double.
- PE* double.
- PE1* double.
- PB* double.
Field is an optional parameter, default setting returns all fields.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of bar. Start and end are optional parameters. If
start, end and ticker are all specified, default 'one' value will be
abandoned.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange.
* ticker: string; the trading code in the form of '000001'.
* one: string; Date mark formatted in 'YYYYMMDD'.
Specifies one date on which data of all tickers are to be requested.
Note that to get effective json data response, at least one parameter
in {secID, ticker, tradeDate} should be entered.
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktEqud.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
#return resp
except AssertionError: return 0
def get_block_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_repo_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_bond_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one bond instrument.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for bonds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- turnoverRate double.
- dealAmount* integer.
- accrInterest* double.
- YTM(yieldToMaturity)* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktBondd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one future contract.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for future contracts)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- contractObject* string.
- contractMark* string.
- preSettlePrice* double.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol integer.
- turnoverValue integer.
- openInt* integer.
- CHG* double.
- CHG1* double.
- CHGPct* double.
- mainCon* integer (0/1 flag).
- smainCon* integer (0/1 flag).
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFutd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_main_D1(self, field='', start='', end='', mark='',
obj='', main=1, one=20150513):
"""
"""
pass
def get_fund_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one mutual fund.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for funds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
- discount* double.
- discountRatio* double.
- circulationShares* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFundd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_index_D1(self, field='', start='', end='', indexID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one stock index.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for indices)
- indexID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- porgFullName* string.
- exchangeCD string.
- preCloseIndex double.
- openIndex double.
- highestIndex double.
- lowestIndex double.
- closeIndex double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktIdxd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'indexID': indexID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_option_D1(self, field='', start='', end='', secID='',
optID='' ,ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one option contact.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for options)
- secID string.
- optID* string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol double.
- turnoverValue double.
- openInt* integer.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktOptd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'optID': optID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_stockFactor_D1(self, field='', secID='',
ticker='000001', start=20130701, end=20130801):
"""
Get 1-day interday factor data for stocks.
parameters
----------
* field: string; variables that are to be requested.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
url = '{}/{}/api/market/getStockFactorsDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
#----------------------------------------------------------------------
# directly get methods - Fundamental Data
def get_balanceSheet(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtBS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_balanceSheet_bnk(self):
"""
"""
pass
def get_balanceSheet_sec(self):
"""
"""
pass
def get_balanceSheet_ins(self):
"""
"""
pass
def get_balanceSheet_ind(self):
"""
"""
pass
def get_cashFlow(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtCF.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_cashFlow_bnk(self):
"""
"""
pass
def get_cashFlow_sec(self):
"""
"""
pass
def get_cashFlow_ins(self):
"""
"""
pass
def get_cashFlow_ind(self):
"""
"""
pass
def get_incomeStatement(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtIS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_incomeStatement_bnk(self):
"""
"""
pass
def get_incomeStatement_sec(self):
"""
"""
pass
def get_incomeStatement_ins(self):
"""
"""
pass
def get_incomeStatement_ind(self):
"""
"""
pass
#----------------------------------------------------------------------
# multi-threading download for database storage.
def __drudgery(self, id, db, indexType,
start, end, tasks, target):
"""
basic drudgery function.
This method loops over a list of tasks(tickers) and get data using
target api.get_# method for all those tickers.
A new feature 'date' or 'dateTime'(for intraday) will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date(time) mark. With the setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* indexType: string(enum): 'date' or 'datetime', specifies what
is the collection index formatted.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
* target: method; the api.get_# method that is to be called by
drudgery function.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
if indexType == 'date':
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
elif indexType == 'datetime':
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
else:
raise ValueError
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = target(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_equity_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_equity_D1)
def get_future_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_future_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_future_D1)
def get_index_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_index_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_index_D1)
def get_bond_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_bond_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_bond_D1)
def get_fund_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_fund_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_fund_D1)
def get_option_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_option_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_option_D1)
#----------------------------------------------------------------------
def __overlord(self, db, start, end, dName,
target1, target2, sessionNum):
"""
Basic controller of multithreading request.
Generates a list of all tickers, creates threads and distribute
tasks to individual #_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* dName: string; the path of file where all tickers' infomation
are stored in.
* target1: method; targetting api method that overlord calls
to get tasks list.
* target2: method; the corresponding drudgery function.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = target1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = target2,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
def get_equity_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get equity D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/equTicker.json',
target1 = self.get_equity_D1,
target2 = self.get_equity_D1_drudgery,
sessionNum = sessionNum)
def get_future_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get future D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/futTicker.json',
target1 = self.get_future_D1,
target2 = self.get_future_D1_drudgery,
sessionNum = sessionNum)
def get_index_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get index D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/idxTicker.json',
target1 = self.get_index_D1,
target2 = self.get_index_D1_drudgery,
sessionNum = sessionNum)
def get_bond_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get bond D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/bndTicker.json',
target1 = self.get_bond_D1,
target2 = self.get_bond_D1_drudgery,
sessionNum = sessionNum)
def get_fund_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get fund D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/fudTicker.json',
target1 = self.get_fund_D1,
target2 = self.get_fund_D1_drudgery,
sessionNum = sessionNum)
def get_option_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get option D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/optTicker.json',
target1 = self.get_option_D1,
target2 = self.get_option_D1_drudgery,
sessionNum = sessionNum)
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
# to be deprecated
def get_equity_D1_drudgery_(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'date' will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date mark. With the default setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = self.get_equity_D1(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
def get_equity_M1_drudgery(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'dateTime', combined by Y-m-d
formatted date part and H:M time part, will be automatically added into
every json-like documents. It would be a datetime.datetime() timestamp
object. In this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars. Note that to ensure the
success of every requests, the range amid start and end had better be
no more than one month.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
k, n = 1, len(tasks)
for secID in tasks:
try:
data = self.get_equity_M1(start = start,
end = end,
secID = secID,
output = 'list')
map(update_dt, data) # add datetime feature to docs.
coll = db[secID]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_M1_interMonth(self, db, id,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
tasks=[]):
"""
Mid-level wrapper of get equity M1 method.
Get 1-minute bar between specified start year and ending year for
more than one tickers in tasks list.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* id: integer; the ID of wrapper session.
* startYr, endYr: integer; the start and ending year amid which the
1-minute bar data is gotten one month by another employing
get_equity_M1_drudgery() function.
Default values are this year and two years before now.
the complete time range will be sub-divided into months. And threads
are deployed for each of these months.
- example
-------
Suppose .now() is Auguest 15th 2015. (20150815)
startYr, endYr = 2014, 2015.
then two list of strings will be generated:
ymdStringStart = ['20140102','20140202', ... '20150802']
ymdStringEnd = ['20140101','20140201', ... '20150801']
the sub-timeRanges passed to drudgeries will be:
(start, end): (20140102, 20140201), (20140202, 20140301),
..., (20150702, 20150801).
So the actual time range is 20140102 - 20150801.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'02' for k in range(1,13)]
ymdStringStart = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStringEnd = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
k = 0
for t in range(len(ymdStringEnd)-1):
start = ymdStringStart[t]
end = ymdStringEnd[t+1]
subID = str(id) + '_' + str(k)
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (subID, db, start, end, tasks))
thrd.start()
k += 1
def get_equity_M1_all(self, db,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
splitNum=10):
"""
"""
"""
# initialize task list.
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
chunkSize = len(allSecIds)/splitNum
taskLists = [allSecIds[k:k+chunkSize] for k in range(
0, len(allSecIds), chunkSize)]
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStrings = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
print taskLists[0]
print ymdStrings
k = 0
for t in range(len(ymdStrings)-1):
start = ymdStrings[t]
end = ymdStrings[t+1]
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (k, db, start, end, taskLists[0]))
thrd.start()
k += 1
return 1
"""
pass
| mit |
wjlei1990/pypaw | scripts/stats_window/stats_window.py | 1 | 3807 | from __future__ import print_function, division
import os
import json
import numpy as np
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def read_txt_into_list(txtfile):
with open(txtfile, 'r') as f:
content = f.readlines()
eventlist = [line.rstrip() for line in content]
return eventlist
def extract_window_info(window_file):
event_info = {}
with open(window_file) as fh:
windows = json.load(fh)
for sta, sta_info in windows.iteritems():
for chan, chan_info in sta_info.iteritems():
comp = chan.split(".")[-1]
if comp not in event_info:
event_info[comp] = {"cc_shift": [], "dlnA": []}
event_info[comp]["cc_shift"].extend(
[_win["cc_shift_in_seconds"] for _win in chan_info])
event_info[comp]["dlnA"].extend(
[_win["dlnA"] for _win in chan_info])
return event_info
def add_event_to_period(event_wininfo, period_wininfo):
for comp, comp_info in event_wininfo.iteritems():
if comp not in period_wininfo:
period_wininfo[comp] = {"cc_shift": [], "dlnA": []}
period_wininfo[comp]["cc_shift"] += comp_info["cc_shift"]
period_wininfo[comp]["dlnA"] += comp_info["dlnA"]
def gather_windows(path):
results = {}
with open(path) as fh:
content = json.load(fh)
results = {}
for period, period_info in content["input"].iteritems():
print("Gather on period: %s" % period)
if len(period_info) == 0:
continue
period_wininfo = {}
for event, event_info in period_info.iteritems():
event_wininfo = extract_window_info(event_info)
print("event wininfo:", event_wininfo.keys())
add_event_to_period(event_wininfo, period_wininfo)
results[period] = period_wininfo
return results, content["outputdir"]
def _stats_(windows, outputfn):
stats_var = {}
keys = ["cc_shift", "dlnA"]
for period, period_info in results.iteritems():
stats_var[period] = {}
for comp, comp_info in period_info.iteritems():
stats_var[period][comp] = {}
for key in keys:
array = np.array(comp_info[key])
stats_var[period][comp][key] = \
{"mean": np.mean(array), "counts": len(array)}
print("output file: %s" % outputfn)
with open(outputfn, 'w') as fh:
json.dump(stats_var, fh, indent=2, sort_keys=True)
def plot_results(windows, outputdir):
keys = ["cc_shift", "dlnA"]
#ps = ["17_40", "40_100", "90_250"]
#ps = ["17_40", "40_100", "90_150", "90_250"]
#ps = ["90_250", ]
ps = ["90_150", ]
cs = ["BHZ", "BHR", "BHT"]
figsize = (8*len(cs), 8*len(ps))
for key in keys:
plt.figure(figsize=figsize, facecolor="w", edgecolor="k")
g = gridspec.GridSpec(len(ps), len(cs))
for ip, p in enumerate(ps):
for ic, c in enumerate(cs):
array = np.array(windows[p][c][key])
plt.subplot(g[ip, ic])
plt.hist(array, 20, alpha=0.75)
plt.xlabel("%s_%s" % (p, c))
plt.savefig(os.path.join(outputdir, "%s.stats.png" % key))
plt.tight_layout()
plt.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', dest='path',
required=True)
args = parser.parse_args()
results, outputdir = gather_windows(args.path)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
fn = os.path.join(outputdir, "windows.stats_val.json")
_stats_(results, outputfn=fn)
plot_results(results, outputdir)
| lgpl-3.0 |
wbengine/SPMILM | egs/ptb_wsj0/wer.py | 1 | 3755 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.insert(0, os.getcwd() + '/../../tools/')
import wb
import trf
import rnn
import lstm
# revise this function to config the dataset used to train different model
def data():
# ptb + wsj93 experiments
return data_ptb() + data_wsj92nbest()
def data_verfy(paths):
for w in paths:
if not os.path.isfile(w):
print('[ERROR] no such file: ' + w)
return paths
def data_ptb():
root = './data/ptb/'
train = root + 'ptb.train.txt'
valid = root + 'ptb.valid.txt'
test = root + 'ptb.test.txt'
return data_verfy([train, valid, test])
def data_wsj92nbest():
root = './data/WSJ92-test-data/'
nbest = root + '1000best.sent'
trans = root + 'transcript.txt'
ac = root + '1000best.acscore'
lm = root + '1000best.lmscore'
return data_verfy([nbest, trans, ac, lm])
def wer_all(lmpaths, lmtypes, outlog):
fres = wb.FRes(outlog, True)
fres.Clean()
[read_nbest, read_trans, read_acscore] = data()[3:6]
lmscale_vec = np.linspace(0.1, 0.9, 9)
weight_vec = np.linspace(0.5, 0.5, 1)
for type in lmtypes:
exist_multiple_run = False
a = type.split('+')
for lm in a:
if lmpaths[lm].find('<run>') != -1:
exist_multiple_run = True
break
run_vec = [0]
run_name = type
if exist_multiple_run:
run_vec = range(0, 10)
run_name = type + ':<run>'
for run in run_vec:
run_str = 'run{}'.format(run)
name = run_name.replace('<run>', run_str)
opt_wer_vec = [100, 1.0, 1.0]
opt_weight = 1.0
if len(a) == 1:
lmscore = wb.LoadScore(lmpaths[a[0]].replace('<run>', run_str))
opt_wer_vec = wb.TuneWER(read_nbest, read_trans,
lmscore, read_acscore, lmscale_vec)
opt_weight = 1.0
else:
lmscore1 = np.array(wb.LoadScore(lmpaths[a[0]].replace('<run>', run_str)))
lmscore2 = np.array(wb.LoadScore(lmpaths[a[1]].replace('<run>', run_str)))
for w in weight_vec:
lmscore = w*lmscore1 + (1-w)*lmscore2
[wer, lmscale, acscale] = wb.TuneWER(read_nbest, read_trans,
lmscore, read_acscore, lmscale_vec)
if wer < opt_wer_vec[0]:
opt_wer_vec = [wer, lmscale, acscale]
opt_weight = w
fres.Add(name, ['wer', 'lmscale', 'acscale', 'weight'], opt_wer_vec + [opt_weight])
if __name__ == '__main__':
lmpaths = {'KN5': 'ngramlm/5gram.lmscore',
'RNN': 'rnnlm/h250_c1_bptt5.run0.lmscore',
'LSTM':'lstmlm/h250_dropout0_epoch10.run0.lmscore',
'TRF': 'trflm/trf_c200_g4_w_c_ws_cs_wsh_csh_tied.<run>.lmscore'}
lmtypes = ['KN5', 'RNN', 'LSTM', 'TRF', 'RNN+KN5', 'RNN+TRF', 'LSTM+KN5', 'LSTM+TRF']
outlog = 'wer.log'
if not os.path.exists(outlog):
wer_all(lmpaths, lmtypes, outlog)
fres = wb.FRes(outlog, True)
lmwers = dict()
with open(outlog, 'rt') as f:
f.readline()
for a in [line.split() for line in f]:
if a[0].find('[all]') != -1:
break
type = a[0].split(':')[0]
wer_vec = lmwers.setdefault(type, [])
wer_vec.append(float(a[1]))
for type in lmtypes:
wer_vec = lmwers[type]
wer_mean = np.mean(wer_vec)
wer_std = np.std(wer_vec)
fres.Add(type + '[all]', ['wer'], ['{:.3f}+{:.3f}'.format(wer_mean, wer_std)])
| apache-2.0 |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/ensemble/tests/test_voting_classifier.py | 21 | 10259 | """Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_predict():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
| bsd-3-clause |
yunque/librosa | tests/test_failures.py | 3 | 2797 | #!/usr/bin/env python
# CREATED:2014-12-29 10:52:23 by Brian McFee <[email protected]>
# unit tests for ill-formed inputs
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import matplotlib
matplotlib.use('Agg')
import numpy as np
import librosa
from nose.tools import raises
@raises(librosa.ParameterError)
def test_mono_valid_stereo():
'''valid_audio: mono=True, y.ndim==2'''
y = np.zeros((2, 1000))
librosa.util.valid_audio(y, mono=True)
def test_valid_stereo_or_mono():
'''valid_audio: mono=False, y.ndim==1'''
y = np.zeros(1000)
librosa.util.valid_audio(y, mono=False)
def test_valid_mono():
'''valid_audio: mono=True, y.ndim==1'''
y = np.zeros(1000)
librosa.util.valid_audio(y, mono=True)
def test_valid_stereo():
'''valid_audio: mono=False, y.ndim==2'''
y = np.zeros((2, 1000))
librosa.util.valid_audio(y, mono=False)
@raises(librosa.ParameterError)
def test_valid_audio_type():
'''valid_audio: list input'''
y = list(np.zeros(1000))
librosa.util.valid_audio(y)
@raises(librosa.ParameterError)
def test_valid_audio_nan():
'''valid_audio: NaN'''
y = np.zeros(1000)
y[10] = np.NaN
librosa.util.valid_audio(y)
@raises(librosa.ParameterError)
def test_valid_audio_inf():
'''valid_audio: Inf'''
y = np.zeros(1000)
y[10] = np.inf
librosa.util.valid_audio(y)
def test_valid_audio_ndim():
'''valid_audio: y.ndim > 2'''
y = np.zeros((3, 10, 10))
@raises(librosa.ParameterError)
def __test(mono):
librosa.util.valid_audio(y, mono=mono)
for mono in [False, True]:
yield __test, mono
@raises(librosa.ParameterError)
def test_frame_hop():
'''frame: hop_length=0'''
y = np.zeros(128)
librosa.util.frame(y, frame_length=10, hop_length=0)
@raises(librosa.ParameterError)
def test_frame_discontiguous():
'''frame: discontiguous input'''
y = np.zeros((128, 2)).T
librosa.util.frame(y[0], frame_length=64, hop_length=64)
def test_frame_contiguous():
'''frame: discontiguous input'''
y = np.zeros((2, 128))
librosa.util.frame(y[0], frame_length=64, hop_length=64)
@raises(librosa.ParameterError)
def test_frame_size():
'''frame: len(y) == 128, frame_length==256, hop_length=128'''
y = np.zeros(64)
librosa.util.frame(y, frame_length=256, hop_length=128)
@raises(librosa.ParameterError)
def test_stft_bad_window():
y = np.zeros(22050 * 5)
n_fft = 2048
window = np.ones(n_fft // 2)
librosa.stft(y, n_fft=n_fft, window=window)
@raises(librosa.ParameterError)
def test_istft_bad_window():
D = np.zeros((1025, 10), dtype=np.complex64)
n_fft = 2 * (D.shape[0] - 1)
window = np.ones(n_fft / 2)
librosa.istft(D, window=window)
| isc |
toobaz/pandas | pandas/tests/api/test_api.py | 2 | 5198 | import pandas as pd
from pandas import api, compat
from pandas.util import testing as tm
class Base:
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted(f for f in dir(namespace) if not f.startswith("__"))
if ignored is not None:
result = sorted(list(set(result) - set(ignored)))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ["tests", "locale", "conftest"]
# top-level sub-packages
lib = [
"api",
"arrays",
"compat",
"core",
"errors",
"pandas",
"plotting",
"test",
"testing",
"tseries",
"util",
"options",
"io",
]
# these are already deprecated; awaiting removal
deprecated_modules = []
# misc
misc = ["IndexSlice", "NaT"]
# top-level classes
classes = [
"Categorical",
"CategoricalIndex",
"DataFrame",
"DateOffset",
"DatetimeIndex",
"ExcelFile",
"ExcelWriter",
"Float64Index",
"Grouper",
"HDFStore",
"Index",
"Int64Index",
"MultiIndex",
"Period",
"PeriodIndex",
"RangeIndex",
"UInt64Index",
"Series",
"SparseArray",
"SparseDataFrame",
"SparseDtype",
"SparseSeries",
"Timedelta",
"TimedeltaIndex",
"Timestamp",
"Interval",
"IntervalIndex",
"CategoricalDtype",
"PeriodDtype",
"IntervalDtype",
"DatetimeTZDtype",
"Int8Dtype",
"Int16Dtype",
"Int32Dtype",
"Int64Dtype",
"UInt8Dtype",
"UInt16Dtype",
"UInt32Dtype",
"UInt64Dtype",
"NamedAgg",
]
if not compat.PY37:
classes.append("Panel")
# these are already deprecated; awaiting removal
deprecated_classes = []
# these should be deprecated in the future
deprecated_classes_in_future = []
# external modules exposed in pandas namespace
modules = ["np", "datetime"]
# top-level functions
funcs = [
"array",
"bdate_range",
"concat",
"crosstab",
"cut",
"date_range",
"interval_range",
"eval",
"factorize",
"get_dummies",
"infer_freq",
"isna",
"isnull",
"lreshape",
"melt",
"notna",
"notnull",
"offsets",
"merge",
"merge_ordered",
"merge_asof",
"period_range",
"pivot",
"pivot_table",
"qcut",
"show_versions",
"timedelta_range",
"unique",
"value_counts",
"wide_to_long",
]
# top-level option funcs
funcs_option = [
"reset_option",
"describe_option",
"get_option",
"option_context",
"set_option",
"set_eng_float_format",
]
# top-level read_* funcs
funcs_read = [
"read_clipboard",
"read_csv",
"read_excel",
"read_fwf",
"read_gbq",
"read_hdf",
"read_html",
"read_json",
"read_msgpack",
"read_pickle",
"read_sas",
"read_sql",
"read_sql_query",
"read_sql_table",
"read_stata",
"read_table",
"read_feather",
"read_parquet",
"read_spss",
]
# top-level to_* funcs
funcs_to = ["to_datetime", "to_msgpack", "to_numeric", "to_pickle", "to_timedelta"]
# top-level to deprecate in the future
deprecated_funcs_in_future = []
# these are already deprecated; awaiting removal
deprecated_funcs = []
# private modules in pandas namespace
private_modules = [
"_config",
"_hashtable",
"_lib",
"_libs",
"_np_version_under1p14",
"_np_version_under1p15",
"_np_version_under1p16",
"_np_version_under1p17",
"_tslib",
"_typing",
"_version",
]
def test_api(self):
self.check(
pd,
self.lib
+ self.misc
+ self.modules
+ self.deprecated_modules
+ self.classes
+ self.deprecated_classes
+ self.deprecated_classes_in_future
+ self.funcs
+ self.funcs_option
+ self.funcs_read
+ self.funcs_to
+ self.deprecated_funcs_in_future
+ self.deprecated_funcs
+ self.private_modules,
self.ignored,
)
class TestApi(Base):
allowed = ["types", "extensions"]
def test_api(self):
self.check(api, self.allowed)
class TestTesting(Base):
funcs = ["assert_frame_equal", "assert_series_equal", "assert_index_equal"]
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
| bsd-3-clause |
automl/SpySMAC | cave/html/html_helpers.py | 1 | 3129 | from collections import OrderedDict
def figure_to_html(figure, prefix=None, max_in_a_row=None, true_break_between_rows=False):
""" Turns filepaths to nice html-figures
Parameters
----------
figure: Union[List[str], str]
path or list of paths
prefix: Union[None, str]
if set, the length of this string will be clipped from beginning
max_in_a_row: Union[None, int]
if set, insert a break after this many plots
true_break_between_rows: bool
if False, a simple <br> tag will be set between the rows, if True, will insert div-end tags
Returns
-------
html: Union[str, List[str]]
html-code or list with independent html-codes
"""
if not prefix:
prefix = ""
if not max_in_a_row or max_in_a_row > len(figure):
max_in_a_row = len(figure)
alternative_text = "Plot missing - is phantomjs installed? Check CAVEs FAQ for infos."
div = "<div align=\"center\">\n"
if not figure:
return ""
elif isinstance(figure, str):
div += ("<a href=\"{0}\" data-lightbox=\"{0}\" "
"data-title=\"{0}\"><img src=\"{0}\" alt=\"{1}\" "
"width=\"600px\"></a>\n".format(figure[len(prefix):].lstrip("/"), alternative_text))
else:
# List with multiple figures size relative, put next to each other
width = (100 - len(figure)) / len(figure)
#width = (100 - max_in_a_row) / max_in_a_row
counter = 0
for fig in figure:
if counter == max_in_a_row:
if true_break_between_rows:
div += "<p style=\"clear: both;\">"
div += "</div>\n"
div += "<div align=\"center\">\n"
else:
div += " <br> "
counter = 0
div += "<a href=\"{0}\" data-lightbox=\"{1}\" data-title=\"{0}\"><img src=\"{0}\"".format(
fig[len(prefix):].lstrip("/"), str(figure))
div += " alt=\"{0}\" style=\"float: left; width: {1}%; margin-right: "\
"1%; margin-bottom: 0.5em;\"></a>\n".format(alternative_text, int(width))
counter += 1
div += "<p style=\"clear: both;\">"
div += "</div>\n"
return div
def _split_table(table: OrderedDict):
"""Splits an OrderedDict into a list of tuples that can be turned into a
HTML-table with pandas DataFrame
Parameters
----------
table: OrderedDict
table that is to be split into two columns
Returns
-------
table_split: List[tuple(key, value, key, value)]
list with two key-value pairs per entry that can be used by pandas
df.to_html()
"""
table_split = []
keys = list(table.keys())
half_size = len(keys) // 2
for i in range(half_size):
j = i + half_size
table_split.append(("<b>" + keys[i] + "</b>", table[keys[i]],
"<b>" + keys[j] + "</b>", table[keys[j]]))
if len(keys) % 2 == 1:
table_split.append(("<b>"+keys[-1]+"</b>", table[keys[-1]], '', ''))
return table_split
| bsd-3-clause |
Richert/BrainNetworks | CMC/config/AdEx_net.py | 1 | 2934 | from pyrates.utility import grid_search_annarchy, plot_timeseries
from ANNarchy import Projection, Population, TimedArray, setup, Network, Monitor, Uniform, Normal, \
EIF_cond_exp_isfa_ista
from pyrates.utility import pyrates_from_annarchy
import matplotlib.pyplot as plt
import numpy as np
# parameters
############
T = 1000.0 # simulation time (ms)
dt = 1e-2 # integration step-size (ms)
Ne = 100 # number of excitatory neurons
Ni = 100 # number of inhibitory neurons
c_min = 0.1
c_max = 1.0
# network definition
####################
setup(method='explicit', dt=dt)
# Neuron definition
neuron = EIF_cond_exp_isfa_ista()
neuron.equations = """
I = g_exc * (e_rev_E - v) + g_inh * (e_rev_I - v) + i_offset * Normal(0.2, 1.0)
tau_m * dv/dt = (v_rest - v + delta_T * exp((v-v_thresh)/delta_T)) + tau_m/cm*(I - w) : init=-70.6
tau_w * dw/dt = a * (v - v_rest) / 1000.0 - w
tau_syn_E * dg_exc/dt = - g_exc : exponential
tau_syn_I * dg_inh/dt = - g_inh : exponential
"""
# population setup
pop = Population(Ne + Ni, neuron=neuron)
E = pop[:Ne]
I = pop[Ne:]
# projection setup
C_ei = Projection(pre=E, post=I, target='exc', name='EI')
C_ie = Projection(pre=I, post=E, target='inh', name='IE')
#C_ee = Projection(E, E, 'exc', name='EE')
#C_ii = Projection(I, I, 'inh', name='II')
C_ei.connect_fixed_probability(0.1, weights=Uniform(c_min, c_max))
C_ie.connect_fixed_probability(0.1, weights=Uniform(c_min, c_max))
#C_ee.connect_fixed_probability(0.3, weights=Uniform(c_min, c_max))
#C_ii.connect_fixed_probability(0.3, weights=Uniform(c_min, c_max))
# input
#steps = int(T/dt)
#I_e_tmp = 5.0 + np.random.randn(steps, Ne) * 50.0 * np.sqrt(dt) # input current for excitatory neurons
#I_i_tmp = 4.0 + np.random.randn(steps, Ni) * 44.0 * np.sqrt(dt) # input current for inhibitory neurons
#I_e = TimedArray(rates=I_e_tmp, name="E_inp")
#I_i = TimedArray(rates=I_i_tmp, name="I_inp")
#inp_e = Projection(pre=I_e, post=E, target='exc')
#inp_i = Projection(pre=I_i, post=I, target='exc')
#inp_e.connect_one_to_one(1.0)
#inp_i.connect_one_to_one(1.0)
E.i_offset = 5.0
I.i_offset = 2.0
# monitoring
obs_e = Monitor(E, variables=['spike', 'v'], start=True)
obs_i = Monitor(I, variables=['spike', 'v'], start=True)
# simulation
############
# annarchy simulation
net = Network(everything=True)
net.compile()
net.simulate(duration=T)
# conversion to pyrates
rate_e = pyrates_from_annarchy(monitors=[net.get(obs_e)], vars=['spike'], pop_average=True)
rate_i = pyrates_from_annarchy(monitors=[net.get(obs_i)], vars=['spike'], pop_average=True)
v_e = pyrates_from_annarchy(monitors=[net.get(obs_e)], vars=['v'], pop_average=False)
v_i = pyrates_from_annarchy(monitors=[net.get(obs_i)], vars=['v'], pop_average=False)
# visualization
###############
plt.plot(rate_e)
plt.plot(rate_i)
plt.figure()
plt.plot(v_e)
plt.figure()
plt.plot(v_i)
plt.show()
| apache-2.0 |
Diyago/Machine-Learning-scripts | time series regression/ARIMA/ARMA_IBMstock.py | 1 | 1527 | # Load modules
from __future__ import print_function
import pandas as pd
from matplotlib import pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import statsmodels.tsa.api as smtsa
import statsmodels.api as sm
import os
#############
# IBM EXAMPLE for ARMA modelling
#############
# Load Dataset
ibm_df = pd.read_csv("datasets/ibm-common-stock-closing-prices.csv")
ibm_df.head()
# Rename the second column
ibm_df.rename(columns={"IBM common stock closing prices": "Close_Price"}, inplace=True)
ibm_df.head()
ibm_df.Close_Price.plot()
# Plot ACF and PACF
ibm_df = ibm_df.dropna()
plot_acf(ibm_df.Close_Price, lags=50)
plot_pacf(ibm_df.Close_Price, lags=50)
# QQ plot and probability plot
sm.qqplot(ibm_df["Close_Price"], line="s")
# Optimize ARMA parameters
aicVal = []
for ari in range(1, 3):
for maj in range(0, 3):
arma_obj = smtsa.ARMA(ibm_df.Close_Price.tolist(), order=(ari, maj)).fit(
maxlag=30, method="mle", trend="nc"
)
aicVal.append([ari, maj, arma_obj.aic])
arma_obj_fin = smtsa.ARMA(ibm_df.Close_Price.tolist(), order=(1, 0)).fit(
maxlag=30, method="mle", trend="nc"
)
ibm_df["ARMA"] = arma_obj_fin.predict()
arma_obj_fin.summary()
# Plot the curves
f, axarr = plt.subplots(1, sharex=True)
f.set_size_inches(5.5, 5.5)
ibm_df["Close_Price"].iloc[1:].plot(color="b", linestyle="-", ax=axarr)
ibm_df["ARMA"].iloc[1:].plot(color="r", linestyle="--", ax=axarr)
axarr.set_title("ARMA(1,0)")
plt.xlabel("Index")
plt.ylabel("Closing price")
| apache-2.0 |
mingkaijiang/eucface_cnp | scripts/translate_GDAY_output_to_EUCFACE_format.py | 1 | 25960 | #!/usr/bin/env python
# coding: utf-8
""" Translate GDAY output file
Match the NCEAS format and while we are at it carry out unit conversion so that
we matched required standard. Data should be comma-delimited
"""
import shutil
import os
import numpy as np
import csv
import sys
#import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
#from io import StringIO
from io import BytesIO
__author__ = "Martin De Kauwe"
__version__ = "1.0 (12.05.2014)"
__email__ = "[email protected]"
def date_converter(*args):
return dt.datetime.strptime(str(int(float(args[0]))) + " " +\
str(int(float(args[1]))), '%Y %j')
def translate_output(infname, met_fname):
outdir = "../outputs"
UNDEF = -9999.
units = setup_units()
variable, variable_names = setup_varnames()
# load met stuff, i.e. the stuff needed for NCEAS output that G'day
# does not output
envir = load_met_input_data(met_fname)
# load the rest of the g'day output
(gday, git_ver) = load_gday_output(infname)
# merge dictionaries to ease output
data_dict = dict(envir, **gday)
ofname = os.path.join(outdir, "temp.nceas")
f = open(ofname, "w")
f.write("%s" % (git_ver))
# write output in csv format
writer = csv.writer(f, dialect=csv.excel, lineterminator="\n")
writer.writerow(variable)
writer.writerow(units)
writer.writerow(variable_names)
for i in range(len(gday['DOY'])):
writer.writerow([("%.8f" % (float(data_dict[k][i])) \
if k in data_dict else UNDEF)
for k in variable_names])
# Need to replace the temp file with the infname which is actually
# the filename we want to use
shutil.move(ofname, infname)
def remove_comments_from_header(fname):
""" I have made files with comments which means the headings can't be
parsed to get dictionary headers for pandas! Solution is to remove these
comments first """
#s = StringIO()
s = BytesIO()
with open(fname) as f:
for line in f:
if '#' in line:
line = line.replace("#", "").lstrip(' ')
s.write(line)
s.seek(0) # "rewind" to the beginning of the StringIO object
return s
def remove_comments_from_header_and_get_git_rev(fname):
""" I have made files with comments which means the headings can't be
parsed to get dictionary headers for pandas! Solution is to remove these
comments first """
#s = StringIO()
s = BytesIO()
with open(fname) as f:
line_counter = 0
for line in f:
if line_counter == 0:
git_ver = line.rstrip(' ')
if '#' in line:
line = line.replace("#", "").lstrip(' ')
s.write(line)
line_counter += 1
s.seek(0) # "rewind" to the beginning of the StringIO object
return s, git_ver
def load_met_input_data(fname):
MJ_TO_MOL = 4.6
SW_TO_PAR = 0.48
DAYS_TO_HRS = 24.0
UMOL_TO_MOL = 1E-6
tonnes_per_ha_to_g_m2 = 100.0
s = remove_comments_from_header(fname)
met_data = pd.read_csv(s, parse_dates=[[0,1]], skiprows=4, index_col=0,
sep=",", keep_date_col=True,
date_parser=date_converter)
precip = met_data["rain"]
par = (met_data["par_am"] + met_data["par_pm"]) * MJ_TO_MOL
air_temp = met_data["tair"]
soil_temp = met_data["tsoil"]
vpd = (met_data["vpd_am"] + met_data["vpd_pm"]) / 2.0
co2 = met_data["co2"]
ndep = met_data["ndep"] * tonnes_per_ha_to_g_m2
return {'CO2': co2, 'PREC':precip, 'PAR':par, 'TAIR':air_temp, 'TSOIL':soil_temp,
'VPD':vpd, 'NDEP':ndep}
def load_gday_output(fname):
SW_RAD_TO_PAR = 2.3
UNDEF = -9999.
tonnes_per_ha_to_g_m2 = 100
yr_to_day = 365.25
(s, git_ver) = remove_comments_from_header_and_get_git_rev(fname)
out = pd.read_csv(s, parse_dates=[[0,1]], skiprows=1, index_col=0,
sep=",", keep_date_col=True, date_parser=date_converter)
year = out["year"]
doy = out["doy"]
# state outputs
pawater_root = out["pawater_root"]
shoot = out["shoot"] * tonnes_per_ha_to_g_m2
stem = out["stem"] * tonnes_per_ha_to_g_m2
branch = out["branch"] * tonnes_per_ha_to_g_m2
fine_root = out["root"] * tonnes_per_ha_to_g_m2
coarse_root = out["croot"] * tonnes_per_ha_to_g_m2
coarse_rootn = out["crootn"] * tonnes_per_ha_to_g_m2
litterc = out["litterc"] * tonnes_per_ha_to_g_m2
littercag = out["littercag"] * tonnes_per_ha_to_g_m2
littercbg = out["littercbg"] * tonnes_per_ha_to_g_m2
soilc = out["soilc"] * tonnes_per_ha_to_g_m2
lai = out["lai"]
shootn = out["shootn"] * tonnes_per_ha_to_g_m2
stemn = out["stemn"] * tonnes_per_ha_to_g_m2
branchn = out["branchn"] * tonnes_per_ha_to_g_m2
rootn = out["rootn"] * tonnes_per_ha_to_g_m2
crootn = out["crootn"] * tonnes_per_ha_to_g_m2
litternag = out["litternag"] * tonnes_per_ha_to_g_m2
litternbg = out["litternbg"] * tonnes_per_ha_to_g_m2
nsoil = out["soiln"] * tonnes_per_ha_to_g_m2
inorgn = out["inorgn"] * tonnes_per_ha_to_g_m2
tnc = out["cstore"] * tonnes_per_ha_to_g_m2
nstorage = out["nstore"] * tonnes_per_ha_to_g_m2
pstorage = out["pstore"] * tonnes_per_ha_to_g_m2
activesoiln = out["activesoiln"] * tonnes_per_ha_to_g_m2
slowsoiln = out["slowsoiln"] * tonnes_per_ha_to_g_m2
passivesoiln = out["passivesoiln"] * tonnes_per_ha_to_g_m2
npoolo = activesoiln + slowsoiln + passivesoiln
shootp = out["shootp"] * tonnes_per_ha_to_g_m2
stemp = out["stemp"] * tonnes_per_ha_to_g_m2
branchp = out["branchp"] * tonnes_per_ha_to_g_m2
rootp = out["rootp"] * tonnes_per_ha_to_g_m2
crootp = out["crootp"] * tonnes_per_ha_to_g_m2
litterpag = out["litterpag"] * tonnes_per_ha_to_g_m2
litterpbg = out["litterpbg"] * tonnes_per_ha_to_g_m2
psoil = out["soilp"] * tonnes_per_ha_to_g_m2
inorgp = out["inorgp"] * tonnes_per_ha_to_g_m2
inorglabp = out["inorglabp"] * tonnes_per_ha_to_g_m2
inorgsorbp = out["inorgsorbp"] * tonnes_per_ha_to_g_m2
inorgavlp = out["inorgavlp"] * tonnes_per_ha_to_g_m2
inorgssorbp = out["inorgssorbp"] * tonnes_per_ha_to_g_m2
inorgoccp = out["inorgoccp"] * tonnes_per_ha_to_g_m2
inorgparp = out["inorgparp"] * tonnes_per_ha_to_g_m2
activesoilp = out["activesoilp"] * tonnes_per_ha_to_g_m2
slowsoilp = out["slowsoilp"] * tonnes_per_ha_to_g_m2
passivesoilp = out["passivesoilp"] * tonnes_per_ha_to_g_m2
ppoolo = activesoilp + slowsoilp + passivesoilp
# fluxes outputs
beta = out["wtfac_root"]
nep = out["nep"] * tonnes_per_ha_to_g_m2
gpp = out["gpp"] * tonnes_per_ha_to_g_m2
npp = out["npp"] * tonnes_per_ha_to_g_m2
rh = out["hetero_resp"] * tonnes_per_ha_to_g_m2
ra = out["auto_resp"] * tonnes_per_ha_to_g_m2
et = out["et"] # mm of water' are same value as kg/m2
trans = out["transpiration"] # mm of water' are same value as kg/m2
soil_evap = out["soil_evap"] # mm of water' are same value as kg/m2
can_evap = out["canopy_evap"] # mm of water' are same value as kg/m2
runoff = out["runoff"] # mm of water' are same value as kg/m2
gl = out["cpleaf"] * tonnes_per_ha_to_g_m2
# gw summed from cpstem and cpbranch below
cpstem = out["cpstem"] * tonnes_per_ha_to_g_m2
cpbranch = out["cpbranch"] * tonnes_per_ha_to_g_m2
gr = out["cproot"] * tonnes_per_ha_to_g_m2
gcr = out["cpcroot"] * tonnes_per_ha_to_g_m2
deadleaves = out["deadleaves"] * tonnes_per_ha_to_g_m2
deadroots = out["deadroots"] * tonnes_per_ha_to_g_m2
deadcroots = out["deadcroots"] * tonnes_per_ha_to_g_m2
deadbranch = out["deadbranch"] * tonnes_per_ha_to_g_m2
deadstems = out["deadstems"] * tonnes_per_ha_to_g_m2
deadleafn = out["deadleafn"] * tonnes_per_ha_to_g_m2
deadbranchn = out["deadbranchn"] * tonnes_per_ha_to_g_m2
deadstemn = out["deadstemn"] * tonnes_per_ha_to_g_m2
deadrootn = out["deadrootn"] * tonnes_per_ha_to_g_m2
deadcrootn = out["deadcrootn"] * tonnes_per_ha_to_g_m2
nup = out["nuptake"] * tonnes_per_ha_to_g_m2
ngross = out["ngross"] * tonnes_per_ha_to_g_m2
nmin = out["nmineralisation"] * tonnes_per_ha_to_g_m2
npleaf = out["npleaf"] * tonnes_per_ha_to_g_m2
nproot = out["nproot"] * tonnes_per_ha_to_g_m2
npcroot = out["npcroot"] * tonnes_per_ha_to_g_m2
npstemimm = out["npstemimm"] * tonnes_per_ha_to_g_m2
npstemmob = out["npstemmob"] * tonnes_per_ha_to_g_m2
npbranch = out["npbranch"] * tonnes_per_ha_to_g_m2
apar = out["apar"] / SW_RAD_TO_PAR
gcd = out["gs_mol_m2_sec"]
ga = out["ga_mol_m2_sec"]
nleach = out["nloss"] * tonnes_per_ha_to_g_m2
activesoil = out["activesoil"] * tonnes_per_ha_to_g_m2
slowsoil = out["slowsoil"] * tonnes_per_ha_to_g_m2
passivesoil = out["passivesoil"] * tonnes_per_ha_to_g_m2
cfretransn = out["leafretransn"] * tonnes_per_ha_to_g_m2
deadleafp = out["deadleafp"] * tonnes_per_ha_to_g_m2
deadbranchp = out["deadbranchp"] * tonnes_per_ha_to_g_m2
deadstemp = out["deadstemp"] * tonnes_per_ha_to_g_m2
deadrootp = out["deadrootp"] * tonnes_per_ha_to_g_m2
deadcrootp = out["deadcrootp"] * tonnes_per_ha_to_g_m2
pup = out["puptake"] * tonnes_per_ha_to_g_m2
pgross = out["pgross"] * tonnes_per_ha_to_g_m2
pmin = out["pmineralisation"] * tonnes_per_ha_to_g_m2
ppleaf = out["ppleaf"] * tonnes_per_ha_to_g_m2
pproot = out["pproot"] * tonnes_per_ha_to_g_m2
ppcroot = out["ppcroot"] * tonnes_per_ha_to_g_m2
ppstemimm = out["ppstemimm"] * tonnes_per_ha_to_g_m2
ppstemmob = out["ppstemmob"] * tonnes_per_ha_to_g_m2
ppbranch = out["ppbranch"] * tonnes_per_ha_to_g_m2
pleach = out["ploss"] * tonnes_per_ha_to_g_m2
cfretransp = out["leafretransp"] * tonnes_per_ha_to_g_m2
# extra traceability stuff
tfac_soil_decomp = out["tfac_soil_decomp"]
c_into_active = out["c_into_active"] * tonnes_per_ha_to_g_m2
c_into_slow = out["c_into_slow"] * tonnes_per_ha_to_g_m2
c_into_passive = out["c_into_passive"] * tonnes_per_ha_to_g_m2
active_to_slow = out["active_to_slow"] * tonnes_per_ha_to_g_m2
active_to_passive = out["active_to_passive"] * tonnes_per_ha_to_g_m2
slow_to_active = out["slow_to_active"] * tonnes_per_ha_to_g_m2
slow_to_passive = out["slow_to_passive"] * tonnes_per_ha_to_g_m2
passive_to_active = out["passive_to_active"] * tonnes_per_ha_to_g_m2
co2_rel_from_surf_struct_litter = out["co2_rel_from_surf_struct_litter"] * tonnes_per_ha_to_g_m2
co2_rel_from_soil_struct_litter = out["co2_rel_from_soil_struct_litter"] * tonnes_per_ha_to_g_m2
co2_rel_from_surf_metab_litter = out["co2_rel_from_surf_metab_litter"] * tonnes_per_ha_to_g_m2
co2_rel_from_soil_metab_litter = out["co2_rel_from_soil_metab_litter"] * tonnes_per_ha_to_g_m2
co2_rel_from_active_pool = out["co2_rel_from_active_pool"] * tonnes_per_ha_to_g_m2
co2_rel_from_slow_pool = out["co2_rel_from_slow_pool"] * tonnes_per_ha_to_g_m2
co2_rel_from_passive_pool = out["co2_rel_from_passive_pool"] * tonnes_per_ha_to_g_m2
# extra priming stuff
rexc = [UNDEF] * len(doy)
rexn = [UNDEF] * len(doy)
co2x = [UNDEF] * len(doy)
factive = [UNDEF] * len(doy)
rtslow = [UNDEF] * len(doy)
rexcue = [UNDEF] * len(doy)
cslo = out["slowsoil"] * tonnes_per_ha_to_g_m2
nslo = out["slowsoiln"] * tonnes_per_ha_to_g_m2
cact = out["activesoil"] * tonnes_per_ha_to_g_m2
nact = out["activesoiln"] * tonnes_per_ha_to_g_m2
# Misc stuff we don't output
drainage = [UNDEF] * len(doy)
rleaf = [UNDEF] * len(doy)
rwood = [UNDEF] * len(doy)
rcr = [UNDEF] * len(doy)
rfr = [UNDEF] * len(doy)
rgrow = [UNDEF] * len(doy)
rsoil = [UNDEF] * len(doy)
cex = [UNDEF] * len(doy)
cvoc = [UNDEF] * len(doy)
lh = [UNDEF] * len(doy)
sh = [UNDEF] * len(doy)
ccoarse_lit = [UNDEF] * len(doy)
ndw = [UNDEF] * len(doy)
pclitb = [UNDEF] * len(doy)
nvol = [UNDEF] * len(doy)
gb = [UNDEF] * len(doy)
grepr = [UNDEF] * len(doy)
cwretransn = [UNDEF] * len(doy)
ccrretransn = [UNDEF] * len(doy)
cfrretransn = [UNDEF] * len(doy)
plretr = [UNDEF] * len(doy)
pwretr = [UNDEF] * len(doy)
pcrretr = [UNDEF] * len(doy)
pfrretr = [UNDEF] * len(doy)
# Misc calcs from fluxes/state
lma = shoot / lai
ncon = shootn / shoot
nflit = litternag + litternbg
pflit = litterpag + litterpbg
pcon = shootp / shoot
recosys = rh + ra
secp = inorgsorbp + inorgssorbp
cw = stem + branch
cwp = stemp + branchp
gw = cpstem + cpbranch
cwn = stemn + branchn
cwin = deadstems + deadbranch
ccrlin = deadcroots
cfrlin = deadroots
ndeadwood = deadbranchn + deadstemn
pdeadwood = deadbranchp + deadstemp
nwood_growth = npstemimm + npstemmob + npbranch
pwood_growth = ppstemimm + ppstemmob + ppbranch
return {'YEAR':year, 'DOY':doy, 'SW':pawater_root, 'SWPA':pawater_root,
'NEP':nep, 'GPP':gpp, 'NPP':npp, 'CEX':cex, 'CVOC':cvoc,
'RECO':recosys, 'RAU':ra, 'RL':rleaf, 'RW':rwood,
'RCR':rcr, 'RFR':rfr,
'RGR':rgrow, 'RHET':rh, 'RSOIL':rsoil, 'ET':et, 'T':trans,
'ES':soil_evap, 'EC':can_evap, 'RO':runoff, 'DRAIN':drainage,
'LE':lh, 'SH':sh, 'CL':shoot, 'CW':cw, 'CCR':coarse_root,
'CFR':fine_root, 'CSTOR':tnc, 'CFLIT':litterc, 'CFLITA':littercag,
'CFLITB':littercbg, 'CCLITB':ccoarse_lit, 'CSOIL':soilc,
'CGL':gl, 'CGW':gw, 'CGCR':gcr, 'CGFR':gr, 'CREPR':grepr, 'CLITIN':deadleaves,
'CCRLIN':ccrlin, 'CFRLIN':cfrlin, 'CWLIN':cwin, 'LAI':lai, 'LMA':lma, 'NCON':ncon,
'NL':shootn, 'NW':cwn, 'NCR':coarse_rootn, 'NFR':rootn,
'NSTOR':nstorage, 'NFLIT': nflit, 'NFLITA':litternag, 'NFLITB':litternbg, 'NCLITB':ndw,
'NSOIL':nsoil, 'NPMIN':inorgn, 'NPORG':npoolo,
'NGL':npleaf, 'NGW':nwood_growth, 'NGCR':npcroot, 'NGFR':nproot,
'NLITIN':deadleafn, 'NCRLIN':deadcrootn,
'NFRLIN':deadrootn, 'NWLIN':ndeadwood, 'NUP':nup,
'NGMIN':ngross, 'NMIN':nmin, 'NVOL': nvol, 'NLEACH':nleach,
'NLRETR':cfretransn, 'NWRETR':cwretransn,
'NCRRETR':ccrretransn, 'NFRRETR':cfrretransn,
'APARd':apar, 'GCd':gcd, 'GAd':ga, 'Gbd':gb, 'Betad':beta,
'PL':shootp, 'PW':cwp,
'PCR':crootp, 'PFR':rootp,
'PSTOR':pstorage, 'PFLIT':pflit,
'PFLITA':litterpag, 'PFLITB':litterpbg, 'PCLITB':pclitb,
'PSOIL':psoil, 'PLAB':inorglabp,
'PSEC':secp, 'POCC':inorgoccp,
'PPAR':inorgparp,
'PPMIN':inorgp, 'PPORG':ppoolo,
'PLITIN':deadleafp, 'PCRLIN':deadcrootp,
'PFRLIN':deadrootp, 'PWLIN':pdeadwood, 'PUP':pup,
'PGMIN':pgross, 'PMIN':pmin, 'PLEACH':pleach,
'PGL':ppleaf, 'PGW':pwood_growth, 'PGCR':ppcroot, 'PGFR':pproot,
'PLRETR':cfretransp, 'PWRETR':pwretr, 'PFRRETR':pcrretr, 'PFRRETR':pfrretr,
'CTOACTIVE':c_into_active, 'CTOSLOW':c_into_slow,
'CTOPASSIVE':c_into_passive, 'CACTIVETOSLOW':active_to_slow,
'CACTIVETOPASSIVE':active_to_passive, 'CSLOWTOACTIVE':slow_to_active,
'CSLOWTOPASSIVE':slow_to_passive, 'CPASSIVETOACTIVE':passive_to_active,
'CACTIVE':activesoil, 'CSLOW':slowsoil, 'CPASSIVE':passivesoil,
'CO2SLITSURF':co2_rel_from_surf_struct_litter,
'CO2SLITSOIL':co2_rel_from_soil_struct_litter,
'CO2MLITSURF':co2_rel_from_surf_metab_litter,
'CO2MLITSOIL':co2_rel_from_soil_metab_litter,
'CO2FSOM':co2_rel_from_active_pool,
'CO2SSOM':co2_rel_from_slow_pool,
'CO2PSOM':co2_rel_from_passive_pool,
'TFACSOM':tfac_soil_decomp,
'REXC':rexc,
'REXN':rexn,
'CO2X':co2x,
'FACTIVE':factive,
'RTSLOW':rtslow,
'REXCUE':rexcue,
'CSLO':cslo,
'NSLO':nslo,
'CACT':cact,
'NACT':nact}, git_ver
def setup_units():
units = ['--','--','Mean ppm', 'mm d-1', 'mol m-2', 'Mean DegC', 'Mean DegC',
'kPa h', 'mm', 'mm', 'gN m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'kgH2O m-2 d-1',
'kgH2O m-2 d-1', 'kgH2O m-2 d-1', 'kgH2O m-2 d-1',
'kgH2O m-2 d-1', 'kgH2O m-2 d-1', 'MJ m-2', 'MJ m-2',
'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2',
'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2 0 to 30 cm',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'm2 m-2', 'gC m-2',
'gN gd.m.-1', 'gN m-2', 'gN m-2', 'gN m-2', 'gN m-2', 'gN m-2',
'gN m-2', 'gN m-2', 'gN m-2', 'gN m-2', 'gN m-2 0 to 30 cm',
'gN m-2 0 to 30 cm', 'gN m-2 0 to 30 cm', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1',
'MJ m-2 d-1', 'mol H2O m-2 s-1', 'mol H2O m-2 s-1',
'mol H2O m-2 s-1', 'frac',
'gP m-2', 'gP m-2', 'gP m-2',
'gP m-2', 'gP m-2', 'gP m-2',
'gP m-2', 'gP m-2', 'gP m-2',
'gP m-2', 'gP m-2',
'gP m-2', 'gP m-2',
'gP m-2', 'gP m-2',
'gP m-2','gP m-2 d-1', 'gP m-2 d-1',
'gP m-2 d-1','gP m-2 d-1', 'gP m-2 d-1',
'gP m-2 d-1', 'gP m-2 d-1', 'gP m-2 d-1',
'gP m-2 d-1', 'gP m-2 d-1', 'gP m-2 d-1', 'gP m-2 d-1',
'gP m-2 d-1', 'gP m-2 d-1', 'gP m-2 d-1', 'gP m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2', 'gC m-2', 'gC m-2',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'frac', 'gC m-2 d-1', 'gN m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'years', 'frac', 'gC m-2 d-1', 'gN m-2 d-1',
'gC m-2 d-1', 'gN m-2 d-1']
return units
def setup_varnames():
variable = ['Year', 'Day of the year', 'CO2', 'Precipitation', 'PAR',
'Air temp canopy', 'Soil temp 10 cm', 'Vapour Pres Def',
'Total soil water content', 'Plant available soil water content',
'N deposition', 'Net Eco Prod',
'Gross Prim Prod', 'Net Prim Prod', 'C exudation',
'C VOC Flux', 'Resp ecosystem', 'Resp autotrophic',
'Resp leaves (maint)', 'Resp Wood (maint)',
'Resp coarse root (maint)',
'Resp Fine Root (maint)', 'Resp growth',
'Resp heterotrophic',
'Evapotranspiration', 'Transpiration', 'Soil Evaporation',
'Canopy evaporation', 'Runoff', 'Drainage', 'Latent Energy',
'Sensible Heat', 'C Leaf Mass', 'C Wood Mass',
'C Coarse Root mass', 'C Fine Root mass',
'C Storage as TNC', 'C Fine Litter Total',
'C Fine Litter above', 'C Fine Litter below',
'C Coarse Litter', 'C Soil', 'C Leaf growth',
'C Wood growth', 'C Coarse Root growth',
'C Fine Root growth', 'C reproduction growth',
'C Leaf Litterfall',
'C Coarse Root litter inputs', 'C Fine Root litter inputs',
'C Wood/branch inputs',
'LAI projected', 'Leaf gC/leaf area', 'N Conc Leaves',
'N Mass Leaves', 'N Mass Wood', 'N Mass Coarse Roots',
'N Mass Fine Roots', 'N storage', 'N fine litter total', 'N litter aboveground',
'N litter belowground', 'N Dead wood', 'N Soil Total',
'N in Mineral form', 'N in Organic form', 'N fixation',
'N Leaf growth', 'N Wood growth', 'N CR growth', 'N Fine Root growth',
'N Leaf Litterfall',
'N Coarse Root litter input', 'N Fine Root litter input',
'N Wood/brch litterfall', 'N Biomass Uptake',
'N Gross Mineralization', 'N Net mineralization',
'N Volatilization', 'N Leaching',
'Foliage retranslocation',
'Wood/Branch retranslocation', 'Coarse Root retranslocation',
'Fine Root retranslocation',
'Aborbed PAR', 'Average daytime canopy conductance',
'Average daytime aerodynamic conductance',
'Average daytime leaf boundary conductance',
'Soil moisture stress',
'P Mass Leaves',
'P Mass Wood', 'P Mass Coarse Roots', 'P Mass Fine Roots',
'P storage', 'P litter total', 'P litter aboveground', 'P litter belowground',
'P coarse litter',
'P Soil Total', 'P in labile form',
'P in secondary form',
'P in occluded form', 'P parent pool',
'P Inorganic pool',
'P in Organic form','P Leaf Litterfall',
'P Coarse Root litter input','P Fine Root litter input', 'P Wood/brch litterfall',
'P Biomass Uptake',
'P Gross Mineralisation', 'P Net mineralisation', 'P Leaching',
'P Leaf growth', 'P Wood growth', 'P CR growth', 'P Fine Root growth',
'P Foliage retranslocation',
'P Wood/Branch retranslocation', 'P Coarse Root retranslocation',
'P Fine Root retranslocation',
'C fluxes from litter & slow/passive to active soil pool',
'C fluxes from litter & active soil pool to slow pool',
'C fluxes from active & slow soil pool to passive pool',
'C flux from active soil pool to slow soil pool',
'C flux from active soil pool to passive soil pool',
'C flux from slow soil pool to active soil pool',
'C flux from slow pool to passive soil pool',
'C flux from passive pool to active pool',
'C Active SOM pool',
'C Slow SOM pool',
'C Passive SOM pool',
'CO2 efflux from surf structural litter',
'CO2 efflux from soil structural litter',
'CO2 efflux from surf metabolic litter',
'CO2 efflux from soil metabolic litter',
'CO2 efflux from fast SOM pool',
'CO2 efflux from slow SOM pool',
'CO2 efflux from passive SOM pool',
'Temperature scalar on C efflux from SOM pools',
'Root Exudation of C',
'Root Exudation of N',
'CO2 released from exudation',
'Total C flux from the active pool',
'Residence time of slow pool',
'REXC carbon use efficiency',
'Total C in the slow pool',
'Total N in the slow pool',
'Total C in the active pool',
'Total N in the active pool']
variable_names = ['YEAR', 'DOY', 'CO2', 'PREC', 'PAR', 'TAIR', 'TSOIL', 'VPD',
'SW', 'SWPA', 'NDEP', 'NEP', 'GPP', 'NPP', 'CEX', 'CVOC',
'RECO', 'RAU', 'RL', 'RW', 'RCR', 'RFR', 'RGR',
'RHET',
'ET', 'T', 'ES', 'EC', 'RO', 'DRAIN', 'LE', 'SH',
'CL', 'CW', 'CCR', 'CFR', 'CSTOR', 'CFLIT', 'CFLITA',
'CFLITB', 'CCLITB', 'CSOIL', 'CGL', 'CGW', 'CGCR', 'CGFR',
'CREPR','CLITIN', 'CCRLIN', 'CFRLIN','CWLIN', 'LAI',
'LMA', 'NCON', 'NL', 'NW', 'NCR', 'NFR', 'NSTOR',
'NFLIT', 'NFLITA','NFLITB', 'NCLITB', 'NSOIL', 'NPMIN', 'NPORG', 'NFIX',
'NGL', 'NGW', 'NGCR', 'NGFR',
'NLITIN', 'NCRLIN', 'NFRLIN','NWLIN', 'NUP', 'NGMIN', 'NMIN',
'NVOL', 'NLEACH', 'NLRETR', 'NWRETR',
'NCRRETR', 'NFRRETR', 'APARd',
'GCd', 'GAd', 'GBd', 'Betad',
'PL', 'PW',
'PCR', 'PFR','PSTOR',
'PFLIT', 'PFLITA', 'PFLITB', 'PCLITB',
'PSOIL', 'PLAB',
'PSEC', 'POCC',
'PPAR',
'PPMIN', 'PPORG',
'PLITIN', 'PCRLIN',
'PFRLIN', 'PWLIN', 'PUP',
'PGMIN', 'PMIN', 'PLEACH',
'PGL', 'PGW', 'PGCR', 'PGFR',
'PLRETR', 'PWRETR', 'PCRRETR', 'PFRRETR',
'CTOACTIVE', 'CTOSLOW', 'CTOPASSIVE', 'CACTIVETOSLOW',
'CACTIVETOPASSIVE', 'CSLOWTOACTIVE', 'CSLOWTOPASSIVE',
'CPASSIVETOACTIVE', 'CACTIVE', 'CSLOW', 'CPASSIVE',
'CO2SLITSURF', 'CO2SLITSOIL', 'CO2MLITSURF',
'CO2MLITSOIL', 'CO2FSOM', 'CO2SSOM', 'CO2PSOM',
'TFACSOM','REXC','REXN','CO2X','FACTIVE','RTSLOW','REXCUE',
'CSLO','NSLO','CACT','NACT']
return variable, variable_names
| gpl-3.0 |
adammenges/statsmodels | statsmodels/examples/ex_scatter_ellipse.py | 39 | 1367 | '''example for grid of scatter plots with probability ellipses
Author: Josef Perktold
License: BSD-3
'''
from statsmodels.compat.python import lrange
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.plot_grids import scatter_ellipse
nvars = 6
mmean = np.arange(1.,nvars+1)/nvars * 1.5
rho = 0.5
#dcorr = rho*np.ones((nvars, nvars)) + (1-rho)*np.eye(nvars)
r = np.random.uniform(-0.99, 0.99, size=(nvars, nvars))
##from scipy import stats
##r = stats.rdist.rvs(1, size=(nvars, nvars))
r = (r + r.T) / 2.
assert np.allclose(r, r.T)
mcorr = r
mcorr[lrange(nvars), lrange(nvars)] = 1
#dcorr = np.array([[1, 0.5, 0.1],[0.5, 1, -0.2], [0.1, -0.2, 1]])
mstd = np.arange(1.,nvars+1)/nvars
mcov = mcorr * np.outer(mstd, mstd)
evals = np.linalg.eigvalsh(mcov)
assert evals.min > 0 #assert positive definite
nobs = 100
data = np.random.multivariate_normal(mmean, mcov, size=nobs)
dmean = data.mean(0)
dcov = np.cov(data, rowvar=0)
print(dmean)
print(dcov)
dcorr = np.corrcoef(data, rowvar=0)
dcorr[np.triu_indices(nvars)] = 0
print(dcorr)
#default
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95])
#used for checking
#fig = scatter_ellipse(data, level=[0.5, 0.75, 0.95], add_titles=True, keep_ticks=True)
#check varnames
varnames = ['var%d' % i for i in range(nvars)]
fig = scatter_ellipse(data, level=0.9, varnames=varnames)
plt.show()
| bsd-3-clause |
imrehg/labhardware | projects/signaltransfer2/signalmeasure.py | 2 | 4174 | """
Transfering data from Stanford Research SR785 Signal analyzer
"""
import ConfigParser
import numpy as np
import sys
from time import strftime, sleep, time
## For Windows:
import matplotlib
matplotlib.rcParams['backend'] = 'wx'
import matplotlib.pylab as pl
# Own modules
sys.path.append("../../")
sys.path.append("../../drivers/")
import sr760
if __name__ == "__main__":
# Load configuration
try:
configfile = sys.argv[1] # first argument is configuration file name
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
except:
print "Cannot find configuration file."
sys.exit(1)
runparams = [None, None, None, None]
if len(sys.argv) >= 2:
try:
runparams[0] = int(sys.argv[2]) # should be span
runparams[1] = int(sys.argv[3]) # should be multipier
runparams[2] = int(sys.argv[4]) # should be averaging
runparams[3] = sys.argv[5]
except (ValueError, IndexError):
pass
# Get Configuration
GPIB = config.getint('Setup', 'GPIB')
basename = config.get('Setup', 'Basename')
# Connect to device
try:
device = sr760.StanfordSR760(GPIB)
except (IOError):
print("Couldn't find things on GPIB channel %d, exiting" %(GPIB))
sys.exit(1)
# Setting up the output filename
if not runparams[3]:
name = raw_input("Output basename? (default: %s) " %basename)
if len(name) > 0:
basename = name
else:
basename = runparams[3]
outname = "%s_%s" %(basename, strftime("%y%m%d_%H%M%S"))
print "0: 191mHz\t1: 382mHz\t2:763mHz\t3:1.5Hz"
print "4: 3.1Hz\t5: 6.1Hz\t6: 12.2Hz\t7: 24.4Hz"
print "8: 48.75Hz\t9: 97.5Hz\t10: 195Hz\t11: 390Hz"
print "12: 780Hz\t13: 1.56kHz\t14: 3.125kHz\t15: 6.25kHz"
print "16: 12.5kHz\t17: 25kHz\t18: 50kHz\t19: 100kHz"
span = -1 if runparams[0] is None else runparams[0]
while span not in range(20):
try:
span = int(raw_input("Frequency span? (s = 0-19) "))
except ValueError:
pass
if span == 0:
multiplier = 0
else:
multiplier = -1 if runparams[1] is None else runparams[1]
while multiplier not in range(span+1):
try:
multiplier = int(raw_input("Multiplier? (m = 0-%d), meaning 2^m better resolution " %(span)))
except ValueError:
pass
avgnum = 0 if runparams[2] is None else runparams[2]
while avgnum < 1:
try:
avgnum = int(raw_input("Averaging number? "))
except ValueError:
pass
print "Output filename: %s" %(outname)
realspan = span - multiplier
ranges = 2 ** multiplier
device.write("SPAN %d" %(realspan))
device.write("ICPL 0") # set AC coupling
device.write("MEAS 0 1") # select PSD measurement
device.write("DISP 0 0") # display log magnitude
startfreq = 0
basefreq = device.basefreq
freqstep = basefreq / 2**(19 - realspan)
start = time()
for i in range(ranges):
device.write("STRF %f" %(startfreq))
device.write("AVGO 1")
device.write("NAVG %d" %avgnum)
device.write("AVGT 0")
device.write("AVGM 0")
device.write("OVLP 0")
sleep(0.05)
device.write("STRT")
sleep(0.05)
ready = False
while not ready:
val = int(device.ask('*STB?'))
ready = (val & 1)
sleep(0.1)
data = device.pulldata()
if i == 0:
vals = data
else:
vals = np.append(vals, data, axis=0)
print "Done %d/%d" %(i+1, ranges)
startfreq += freqstep
print "Total time: %.1fs" %(time()-start)
# Get save data
np.savetxt("%s.csv" %(outname), vals, delimiter=",")
# Data plotting
xval = vals[:, 0]
pl.figure(figsize=(11.69, 8.27)) # landscape alignment A4
yval = vals[:, 1]
pl.subplot(111)
pl.plot(xval, yval, '-')
pl.ylabel('Y')
pl.xlabel('Hz')
pl.xlim([xval[0], xval[-1]])
pl.savefig("%s.png" %(outname))
device.write("LOCL 0")
pl.show()
| mit |
jhektor/Puffin | inputfiles/HWRcrypla/calibration_main.py | 1 | 6620 | from subprocess import call #interface to command LangevinNoisePositive
import scipy.optimize as spo
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
plt.ion() #interactive plotting
fig = plt.figure()
ax1 = fig.add_subplot(111)
# ax1.set_title('001 loading')
# ax1.set_ylim([0,35])
# ax2 = fig.add_subplot(122)
# ax2.set_title('100 loading')
# ax2.set_ylim([0,35])
#Define the objective function, minimize the 2-norm or simulation-experiment
def calibfcn(x, pltstring = '--'):
error = 0
G0 = [8.5, 10.4, 5.1, 5.1, 8.5, 10.4, 5.1, 5.1, 4.3, 4.5, 4.5, 5.6, 4.3, 4.5, 4.5, 5.6, 7.4, 7.4, 15, 15, 6.6, 6.6, 6.6, 6.6, 12, 12, 12, 12, 12, 12, 12, 12]
for loadcase in [0]:
#Read experimental data from mat files
if loadcase is 0:
data_file = '/home/johan/projects/Puffin/inputfiles/calibration_crypla/data/Kariya010.mat'
eul2 = 'Materials/elasticity_tensor/euler_angle_1=0 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=0' #changes the rotation of crystal to 010 along loading in input file
G0[2] = 2#5.1
G0[3] = 2#5.1
G0[5] = 5#10.4
G0[6] = 2#5.1
G0[7] = 2#5.1
G0[15] = 3#5.6
G0[16] =4# 7.4
G0[24] = 6#12
G0[25] = 6#12
# data_file = '/home/johan/projects/Puffin/inputfiles/calibration_crypla/data/5compDirTest_001.mat'
# # data_file = '/home/johan/projects/Puffin/inputfiles/calibration_crypla/data/philippi2016extra001.mat'
# eul2 = 'Materials/elasticity_tensor/euler_angle_1=0 Materials/elasticity_tensor/euler_angle_2=0 Materials/elasticity_tensor/euler_angle_3=0' #changes the rotation of crystal to 001 along loading in input file
# # eul2 = 'Materials/elasticity_tensor/euler_angle_1=60 Materials/elasticity_tensor/euler_angle_2=90' #changes the rotation of crystal to 001 along loading in input file
elif loadcase is 1:
data_file = '/home/johan/projects/Puffin/inputfiles/calibration_crypla/data/Kariya110.mat'
eul2 = 'Materials/elasticity_tensor/euler_angle_1=0 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=45' #changes the rotation of crystal to 010 along loading in input file # data_file = '/home/johan/projects/Puffin/inputfiles/calibration_crypla/data/5compDirTest_100.mat'
# # data_file = '/home/johan/projects/Puffin/inputfiles/calibration_crypla/data/philippi2016extra110v2.mat'
# eul2 = 'Materials/elasticity_tensor/euler_angle_1=0 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=90' #changes the rotation of crystal to 100 along loading in input file
# # eul2 = 'Materials/elasticity_tensor/euler_angle_1=0 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=45' #changes the rotation of crystal to 100 along loading in input file
# # eul2 = 'Materials/elasticity_tensor/euler_angle_1=30 Materials/elasticity_tensor/euler_angle_2=60 Materials/elasticity_tensor/euler_angle_3=75' #
data = sio.loadmat(data_file)
strain_exp = data['xx'][:,0]
stress_exp = data['yy'][:,0]#*1e-6 #in MPa
x0 = [50, 20, 0.0001, 6] # B, Q, gamma0, m
xs = x*x0
#Set up moose input file
inputfile = '1element_HWR.i '
# names of properties to calibrate
state_var_rate_B_name = 'UserObjects/state_var_evol_rate/B='
slip_res_Q_name = 'UserObjects/slip_resistance/Q='
gamma0_name = 'UserObjects/slip_rate/gamma0='
m_name = 'UserObjects/slip_rate/m='
G0_name = 'UserObjects/slip_resistance/G0='
B_str = "'" + str(xs[0]) + "'"
Q_str = str(xs[1])
gamma0_str = str(xs[2])
m_str = str(xs[3])
G0_str = str(G0)
G0_str = G0_str.replace('[',"'")
G0_str = G0_str.replace(']',"'")
G0_str = G0_str.replace(', '," ")
#Run moose simulation
print 'Load case:', loadcase
print "\033[95mCurrent material parameters:" + "\033[95m{}\033[0m".format(xs)
runcmd = 'mpirun -n 1 ../../puffin-opt -i ' + inputfile + state_var_rate_B_name + B_str + ' ' + slip_res_Q_name + Q_str + ' ' + gamma0_name + gamma0_str + ' ' + m_name + m_str + ' ' + eul2 +' ' + G0_name + G0_str + ' > mooselog.txt'
print 'Running this command:\n' + runcmd + "\033[0m"
call(runcmd, shell=True)
#Get stress strain curve from csv file
# aa = np.recfromcsv('calibrationSn.csv')
aa = np.loadtxt('hwr.csv',delimiter = ',', skiprows = 1)
# idx = (np.abs(-aa[:,-3] - 0.12)).argmin()
#idx = -1
strain_sim = -aa[1:,-2] #eps_zz
stress_sim = -aa[1:,-1] #sigma_zz in MPa (compression positive)
print error
if np.max(strain_sim) < 0.048: #this means the simulation failed ???
error += 2000
else:
#Interpolate experimental values to simulated times
stress_exp_interp = np.interp(strain_sim,strain_exp,stress_exp)
#Calculate error
# error += np.linalg.norm((stress_sim-stress_exp_interp)/stress_exp_interp)
error += np.linalg.norm((stress_exp_interp-stress_sim)/stress_sim)
if loadcase is 0:
# error = np.linalg.norm((stress_sim-stress_exp_interp)/stress_exp_interp)
pltstring = '-'
ax1.plot(strain_exp,stress_exp,'ko')
ax1.plot(strain_sim,stress_sim,pltstring)
elif loadcase is 1:
pltstring = '--'
ax1.plot(strain_exp,stress_exp,'go')
ax1.plot(strain_sim,stress_sim,pltstring)
plt.pause(0.05)
# plt.pause(5)
print "\033[91mError is: \033[00m"+"\033[91m {}\033[00m".format(error)
return error
# Minimize the objective function
# x = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
# bounds = ((0,None),(0,None),(0,None),(0,None),(0,None))
bounds = ((0, None),(0,None), (0,None),(0,20))
# mpar = [0.001, 6, 7e-3, 8]
# mpar = 67*[1]
mpar = [1, 1, 1,1] #B, Q, gamma0, m
results = spo.minimize(calibfcn,mpar,bounds=bounds)
print mpar
if not results.success:
print results.message
else:
print "Successful optimization!, %5d, iterations" % (results.nit)
#Run simulation with the calibrated parameters
calibfcn(results.x,pltstring='-')
# calibfcn(mpar,pltstring='-')
# plt.pause()
# ax.plot(strain_exp,stress_exp,strain_sim,stress_sim,strain_sim,stress_exp_interp)
plt.show(block=True)
# calibfcn(mpar,(strain_exp, stress_exp))
| lgpl-2.1 |
devanshdalal/scikit-learn | sklearn/model_selection/_split.py | 7 | 68700 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
X, y, groups = indexable(X, y, groups)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class _RepeatedSplits(with_metaclass(ABCMeta)):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, (np.integer, numbers.Integral)):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 1:
raise ValueError("Number of repetitions must be greater than 1.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
See also
--------
RepeatedStratifiedKFold: Repeates Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedKFold, self).__init__(
KFold, n_repeats, random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedStratifiedKFold, self).__init__(
StratifiedKFold, n_repeats, random_state, n_splits=n_splits)
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
keflavich/scikit-image | skimage/io/tests/test_mpl_imshow.py | 12 | 2852 | from __future__ import division
import numpy as np
from skimage import io
from skimage._shared._warnings import expected_warnings
import matplotlib.pyplot as plt
def setup():
io.reset_plugins()
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
im64 = im8.astype(np.uint64)
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
plt.figure()
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_uint16():
plt.figure()
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_float():
plt.figure()
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_low_dynamic_range():
with expected_warnings(["Low image dynamic range"]):
ax_im = io.imshow(im_lo)
assert ax_im.get_clim() == (im_lo.min(), im_lo.max())
# check that a colorbar was created
assert ax_im.colorbar is not None
def test_outside_standard_range():
plt.figure()
with expected_warnings(["out of standard range"]):
ax_im = io.imshow(im_hi)
assert ax_im.get_clim() == (im_hi.min(), im_hi.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_nonstandard_type():
plt.figure()
with expected_warnings(["Non-standard image type",
"Low image dynamic range"]):
ax_im = io.imshow(im64)
assert ax_im.get_clim() == (im64.min(), im64.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_signed_image():
plt.figure()
im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]])
ax_im = io.imshow(im_signed)
assert ax_im.get_clim() == (-0.5, 0.5)
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
if __name__ == '__main__':
np.testing.run_module_suite()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/indexes/datetimes/test_misc.py | 2 | 15440 | import calendar
from datetime import datetime
import locale
import unicodedata
import numpy as np
import pytest
import pandas as pd
from pandas import DatetimeIndex, Index, Timedelta, Timestamp, date_range, offsets
import pandas._testing as tm
class TestTimeSeries:
def test_range_edges(self):
# GH#13672
idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000000001"),
end=Timestamp("1970-01-01 00:00:00.000000004"),
freq="N",
)
exp = DatetimeIndex(
[
"1970-01-01 00:00:00.000000001",
"1970-01-01 00:00:00.000000002",
"1970-01-01 00:00:00.000000003",
"1970-01-01 00:00:00.000000004",
],
freq="N",
)
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000000004"),
end=Timestamp("1970-01-01 00:00:00.000000001"),
freq="N",
)
exp = DatetimeIndex([], freq="N")
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000000001"),
end=Timestamp("1970-01-01 00:00:00.000000001"),
freq="N",
)
exp = DatetimeIndex(["1970-01-01 00:00:00.000000001"], freq="N")
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01 00:00:00.000001"),
end=Timestamp("1970-01-01 00:00:00.000004"),
freq="U",
)
exp = DatetimeIndex(
[
"1970-01-01 00:00:00.000001",
"1970-01-01 00:00:00.000002",
"1970-01-01 00:00:00.000003",
"1970-01-01 00:00:00.000004",
],
freq="U",
)
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01 00:00:00.001"),
end=Timestamp("1970-01-01 00:00:00.004"),
freq="L",
)
exp = DatetimeIndex(
[
"1970-01-01 00:00:00.001",
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.003",
"1970-01-01 00:00:00.004",
],
freq="L",
)
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01 00:00:01"),
end=Timestamp("1970-01-01 00:00:04"),
freq="S",
)
exp = DatetimeIndex(
[
"1970-01-01 00:00:01",
"1970-01-01 00:00:02",
"1970-01-01 00:00:03",
"1970-01-01 00:00:04",
],
freq="S",
)
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01 00:01"),
end=Timestamp("1970-01-01 00:04"),
freq="T",
)
exp = DatetimeIndex(
[
"1970-01-01 00:01",
"1970-01-01 00:02",
"1970-01-01 00:03",
"1970-01-01 00:04",
],
freq="T",
)
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01 01:00"),
end=Timestamp("1970-01-01 04:00"),
freq="H",
)
exp = DatetimeIndex(
[
"1970-01-01 01:00",
"1970-01-01 02:00",
"1970-01-01 03:00",
"1970-01-01 04:00",
],
freq="H",
)
tm.assert_index_equal(idx, exp)
idx = date_range(
start=Timestamp("1970-01-01"), end=Timestamp("1970-01-04"), freq="D"
)
exp = DatetimeIndex(
["1970-01-01", "1970-01-02", "1970-01-03", "1970-01-04"], freq="D"
)
tm.assert_index_equal(idx, exp)
class TestDatetime64:
def test_datetimeindex_accessors(self):
dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
# GH#13303
dti_tz = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern"
)
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.isocalendar().week[0] == 1
assert dti.isocalendar().week[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
dti.name = "name"
# non boolean accessors -> return Index
for accessor in DatetimeIndex._field_ops:
if accessor in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
continue
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == "name"
# boolean accessors -> return array
for accessor in DatetimeIndex._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name")
tm.assert_index_equal(res, exp)
dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
msg = "Custom business days is not supported by is_month_start"
with pytest.raises(ValueError, match=msg):
dti.is_month_start
dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
assert dti.is_month_start[0] == 1
tests = [
(Timestamp("2013-06-01", freq="M").is_month_start, 1),
(Timestamp("2013-06-01", freq="BM").is_month_start, 0),
(Timestamp("2013-06-03", freq="M").is_month_start, 0),
(Timestamp("2013-06-03", freq="BM").is_month_start, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_month_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_quarter_end, 1),
(Timestamp("2013-02-28", freq="Q-FEB").is_year_end, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_month_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_quarter_start, 1),
(Timestamp("2013-03-01", freq="Q-FEB").is_year_start, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_month_end, 1),
(Timestamp("2013-03-31", freq="QS-FEB").is_quarter_end, 0),
(Timestamp("2013-03-31", freq="QS-FEB").is_year_end, 0),
(Timestamp("2013-02-01", freq="QS-FEB").is_month_start, 1),
(Timestamp("2013-02-01", freq="QS-FEB").is_quarter_start, 1),
(Timestamp("2013-02-01", freq="QS-FEB").is_year_start, 1),
(Timestamp("2013-06-30", freq="BQ").is_month_end, 0),
(Timestamp("2013-06-30", freq="BQ").is_quarter_end, 0),
(Timestamp("2013-06-30", freq="BQ").is_year_end, 0),
(Timestamp("2013-06-28", freq="BQ").is_month_end, 1),
(Timestamp("2013-06-28", freq="BQ").is_quarter_end, 1),
(Timestamp("2013-06-28", freq="BQ").is_year_end, 0),
(Timestamp("2013-06-30", freq="BQS-APR").is_month_end, 0),
(Timestamp("2013-06-30", freq="BQS-APR").is_quarter_end, 0),
(Timestamp("2013-06-30", freq="BQS-APR").is_year_end, 0),
(Timestamp("2013-06-28", freq="BQS-APR").is_month_end, 1),
(Timestamp("2013-06-28", freq="BQS-APR").is_quarter_end, 1),
(Timestamp("2013-03-29", freq="BQS-APR").is_year_end, 1),
(Timestamp("2013-11-01", freq="AS-NOV").is_year_start, 1),
(Timestamp("2013-10-31", freq="AS-NOV").is_year_end, 1),
(Timestamp("2012-02-01").days_in_month, 29),
(Timestamp("2013-02-01").days_in_month, 28),
]
for ts, value in tests:
assert ts == value
# GH 6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
expected = [52, 1, 1]
assert dates.isocalendar().week.tolist() == expected
assert [d.weekofyear for d in dates] == expected
# GH 12806
@pytest.mark.parametrize(
"time_locale", [None] if tm.get_locales() is None else [None] + tm.get_locales()
)
def test_datetime_name_accessors(self, time_locale):
# Test Monday -> Sunday and January -> December, in that sequence
if time_locale is None:
# If the time_locale is None, day-name and month_name should
# return the english attributes
expected_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
expected_months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_days = calendar.day_name[:]
expected_months = calendar.month_name[1:]
# GH#11128
dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
english_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
for day, name, eng_name in zip(range(4, 11), expected_days, english_days):
name = name.capitalize()
assert dti.day_name(locale=time_locale)[day] == name
ts = Timestamp(datetime(2016, 4, day))
assert ts.day_name(locale=time_locale) == name
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.day_name(locale=time_locale)[-1])
ts = Timestamp(pd.NaT)
assert np.isnan(ts.day_name(locale=time_locale))
# GH#12805
dti = date_range(freq="M", start="2012", end="2013")
result = dti.month_name(locale=time_locale)
expected = Index([month.capitalize() for month in expected_months])
# work around different normalization schemes
# https://github.com/pandas-dev/pandas/issues/22342
result = result.str.normalize("NFD")
expected = expected.str.normalize("NFD")
tm.assert_index_equal(result, expected)
for date, expected in zip(dti, expected_months):
result = date.month_name(locale=time_locale)
expected = expected.capitalize()
result = unicodedata.normalize("NFD", result)
expected = unicodedata.normalize("NFD", result)
assert result == expected
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.month_name(locale=time_locale)[-1])
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
tm.assert_index_equal(dti.nanosecond, Index(np.arange(10, dtype=np.int64)))
def test_iter_readonly():
# GH#28055 ints_to_pydatetime with readonly array
arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")])
arr.setflags(write=False)
dti = pd.to_datetime(arr)
list(dti)
def test_week_and_weekofyear_are_deprecated():
# GH#33595 Deprecate week and weekofyear
idx = date_range(start="2019-12-29", freq="D", periods=4)
with tm.assert_produces_warning(FutureWarning):
idx.week
with tm.assert_produces_warning(FutureWarning):
idx.weekofyear
def test_isocalendar_returns_correct_values_close_to_new_year_with_tz():
# GH 6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
result = dates.isocalendar()
expected_data_frame = pd.DataFrame(
[[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]],
columns=["year", "week", "day"],
index=dates,
dtype="UInt32",
)
tm.assert_frame_equal(result, expected_data_frame)
def test_add_timedelta_preserves_freq():
# GH#37295 should hold for any DTI with freq=None or Tick freq
tz = "Canada/Eastern"
dti = date_range(
start=Timestamp("2019-03-26 00:00:00-0400", tz=tz),
end=Timestamp("2020-10-17 00:00:00-0400", tz=tz),
freq="D",
)
result = dti + Timedelta(days=1)
assert result.freq == dti.freq
| bsd-3-clause |
juanka1331/VAN-applied-to-Nifti-images | scripts/print_means_images_per_group.py | 1 | 1406 | from lib.data_loader import MRI_stack_NORAD
from lib import regenerate_utils as utils
from matplotlib import pyplot as plt
import matplotlib
matplotlib.get_backend()
plt.interactive(False)
dict_norad = MRI_stack_NORAD.get_gm_stack()
# Truncate values over 1 to 1, and under 0 to 0
dict_norad['stack'][dict_norad['stack'] < 0 ] = 0
dict_norad['stack'][dict_norad['stack'] > 1 ] = 1
media_imagen_false = utils.get_mean_over_samples_images(dict_norad, 0)
media_imagen_pos = utils.get_mean_over_samples_images(dict_norad, 1)
media_3d_false = utils.reconstruct_3d_image(media_imagen_false,
dict_norad['voxel_index'], dict_norad['imgsize'])
media_3d_pos = \
utils.reconstruct_3d_image(media_imagen_pos, dict_norad['voxel_index'], dict_norad['imgsize'])
img_index = 40
sample_pos = utils.reconstruct_3d_image(dict_norad['stack'][-img_index,:], dict_norad['voxel_index'], dict_norad['imgsize'])
sample_neg = utils.reconstruct_3d_image(dict_norad['stack'][img_index,:], dict_norad['voxel_index'], dict_norad['imgsize'])
index = 77
plt.figure(1)
plt.imshow(media_3d_false[:,index,:], cmap="Greys")
plt.show(block=False)
plt.figure(2)
plt.imshow(media_3d_pos[:,index,:], cmap="Greys")
plt.show(block=False)
plt.figure(3)
plt.imshow(sample_pos[:,index,:], cmap="Greys")
plt.show(block=False)
plt.figure(4)
plt.imshow(sample_neg[:,index,:], cmap="Greys")
plt.show() | gpl-2.0 |
pminervini/ebemkg | dimensionality/cluster.py | 2 | 2276 | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import numpy
import tsne
import sklearn
import sklearn.cluster
import sklearn.datasets
import logging
class ClusteringMethod(object):
__metaclass__ = ABCMeta
@abstractmethod
def apply(self, X): pass
class NoClustering(ClusteringMethod):
def __init__(self, n_clusters=1):
self.n_clusters = n_clusters
def apply(self, X):
return numpy.zeros(shape=(X.shape[0]))
class KMeans(ClusteringMethod):
def __init__(self, n_clusters=8):
self.kmeans = sklearn.cluster.KMeans(n_clusters=n_clusters)
def apply(self, X):
self.kmeans.fit(X)
return self.kmeans.predict(X)
class AffinityPropagation(ClusteringMethod):
def __init__(self, damping=0.5):
self.affinity_propagation = sklearn.cluster.AffinityPropagation(damping=damping)
def apply(self, X):
self.affinity_propagation.fit(X)
return self.affinity_propagation.predict(X)
class MeanShift(ClusteringMethod):
def __init__(self, bandwidth=None):
self.mean_shift = sklearn.cluster.MeanShift(bandwidth=bandwidth)
def apply(self, X):
self.mean_shift.fit(X)
return self.mean_shift.predict(X)
class SpectralClustering(ClusteringMethod):
def __init__(self, n_clusters=8):
self.spectral_clustering = sklearn.cluster.SpectralClustering(n_clusters=n_clusters)
def apply(self, X):
self.spectral_clustering.fit(X)
return self.spectral_clustering.fit_predict(X)
class AgglomerativeClustering(ClusteringMethod):
def __init__(self, n_clusters=2):
self.agglomerative_clustering = sklearn.cluster.AgglomerativeClustering(n_clusters=n_clusters)
def apply(self, X):
self.agglomerative_clustering.fit(X)
return self.agglomerative_clustering.predict(X)
class DBSCAN(ClusteringMethod):
def __init__(self, eps=0.5):
self.dbscan = sklearn.cluster.DBSCAN(eps=eps)
def apply(self, X):
self.dbscan.fit(X)
return self.dbscan.predict(X)
class GMM(ClusteringMethod):
def __init__(self, n_components=1):
self.gmm = sklearn.cluster.GMM(n_components=n_components)
def apply(self, X):
self.gmm.fit(X)
return self.gmm.predict(X)
| gpl-2.0 |
nvoron23/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 68 | 23597 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
| bsd-3-clause |
muxiaobai/CourseExercises | python/kaggle/pandas/demo.py | 1 | 1347 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#https://www.kaggle.com/learn/pandas
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas import Series,DataFrame
import seaborn as sns
# 常用
# 基本信息
input_data.columns
input_data["country"].unique()
input_data["points"].describe()
input_data.head()
input_data.info()
input_data["country"].value_counts()
# 类型
input_data.points.dtype
input_data.points.astype("int64")
# 查空
input_data.isnull().sum()
input_data.isnull().sum().sort_values(ascending=False).head(10)
#df.fillna(value=0)
mean = input_data["price"].mean()
input_data["price_nonull"] = input_data["price"].fillna(mean)
print ("price",input_data["price"].isnull().sum())
print ("price_nonull",input_data["price_nonull"].isnull().sum())
#查重
#重复为True,不重复为False
input_data.duplicated(subset=["description","points","price","country","province","variety"], keep='first').value_counts()
input_data.duplicated().value_counts()
out_data = input_data.drop_duplicates(subset=["description","points","price","country","province","variety"],keep='last',inplace=False)
# 获取
input_data.loc[input_data.country.isin(["Spain","France"])]
input_data.loc[input_data.country.isnull()|input_data.province.isnull()]
input_data.loc[(input_data["country"]=="US")|(input_data.points>95)] | gpl-2.0 |
ellisztamas/faps | tests/test_simulate_mating.py | 1 | 2331 | from faps.sibshipCluster import sibshipCluster
import numpy as np
import pandas as pd
import faps as fp
ndraws=1000
np.random.seed(867)
def test_method():
# Simulate a starting population
allele_freqs = np.random.uniform(0.3,0.5,50)
adults = fp.make_parents(100, allele_freqs, family_name='a')
progeny = fp.make_sibships(adults, 0, [1,2,3], 5, 'x')
mothers = adults.subset(progeny.mothers)
patlik = fp.paternity_array(progeny, mothers, adults, mu = 0.0015, missing_parents=0.01)
sc = fp.sibship_clustering(patlik)
# Check posterior_mating returns what it should in ideal case
me = sc.posterior_mating()
assert isinstance(me, pd.DataFrame)
assert me['posterior_probability'].sum() == 1.0
assert all([x in sc.candidates for x in me['father']])
# Remove one of the fathers and check that a missing dad is sampled.
patlik.purge = "a_1"
sc2 = fp.sibship_clustering(patlik)
me2 = sc2.posterior_mating()
assert isinstance(me2, pd.DataFrame)
assert me2['father'].isin(["missing"]).any()
# Include a nonsense covariate
cov = np.arange(0,adults.size)
cov = -cov/cov.sum()
patlik.add_covariate(cov)
sc3 = fp.sibship_clustering(patlik, use_covariates=True)
me3 = sc3.posterior_mating(use_covariates=True)
assert isinstance(me3, pd.DataFrame)
assert me3['posterior_probability'].sum() == 1.0
# Check that
sc4 = fp.sibship_clustering(patlik, use_covariates=True)
me4 = sc4.posterior_mating(use_covariates=True, covariates_only=True)
assert isinstance(me3, pd.DataFrame)
assert me3['posterior_probability'].sum() == 1.0
# Generate a population of adults
allele_freqs = np.random.uniform(0.3,0.5,50)
adults = fp.make_parents(20, allele_freqs)
# Example with multiple half-sib families
progeny = fp.make_offspring(parents = adults, dam_list=[7,7,7,7,7,1,8,8,0], sire_list=[2,4,4,4,4,6,3,0,7])
# A single genotypeArray giving the mother of each of 984 offspring individuals.
mothers = adults.subset(individuals=progeny.mothers)
# Create the paternity array and save for later.
patlik = fp.paternity_array(progeny, mothers, adults, mu = 0.0013 )
patlik = patlik.split(by=progeny.mothers)
# The dictionary is passed to sibship_clustering.
sibships = fp.sibship_clustering(patlik)
me = fp.posterior_mating(sibships)
me
| mit |
fabianp/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
LedaLima/incubator-spot | spot-setup/migration/migrate_old_dns_data.py | 7 | 11399 | #!/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
import fnmatch
import re
import pandas as pd
import datetime
from utilities import util
old_oa_path=sys.argv[1]
staging_db=sys.argv[2]
hdfs_staging_path=sys.argv[3]
dest_db = sys.argv[4]
impala_daemon = sys.argv[5]
# Execution example:
#./migrate_old_dns_data.py '/home/spotuser/incubator-spot_old/spot-oa' 'spot_migration' '/user/spotuser/spot_migration/' 'migrated' 'node01'
def main():
log = util.get_logger('SPOT.MIGRATE.DNS')
cur_path = os.path.dirname(os.path.realpath(__file__))
new_spot_path = os.path.split(os.path.split(cur_path)[0])[0]
new_oa_path = '{0}/spot-oa'.format(new_spot_path)
log.info('New Spot OA path: {0}'.format(new_oa_path))
old_spot_path = os.path.split(old_oa_path)[0]
log.info("Creating HDFS paths for Impala tables")
util.create_hdfs_folder('{0}/dns/scores'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/dendro'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/edge'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/summary'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/storyboard'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/threat_dendro'.format(hdfs_staging_path),log)
util.execute_cmd('hdfs dfs -setfacl -R -m user:impala:rwx {0}'.format(hdfs_staging_path),log)
log.info("Creating Staging tables in Impala")
util.execute_cmd('impala-shell -i {0} --var=hpath={1} --var=dbname={2} -c -f create_dns_migration_tables.hql'.format(impala_daemon, hdfs_staging_path, staging_db),log)
## dns Ingest Summary
log.info('Processing Dns Ingest Summary')
ing_sum_path='{0}/data/dns/ingest_summary/'.format(old_oa_path)
pattern='is_??????.csv'
staging_table_name = 'dns_ingest_summary_tmp'
dest_table_name = 'dns_ingest_summary'
if os.path.exists(ing_sum_path):
for file in fnmatch.filter(os.listdir(ing_sum_path), pattern):
log.info('Processing file: {0}'.format(file))
filepath='{0}{1}'.format(ing_sum_path, file)
df = pd.read_csv(filepath)
s = df.iloc[:,0]
l_dates = list(s.unique())
l_dates = map(lambda x: x[0:10].strip(), l_dates)
l_dates = filter(lambda x: re.match('\d{4}[-/]\d{2}[-/]\d{1}', x), l_dates)
s_dates = set(l_dates)
for date_str in s_dates:
dt = datetime.datetime.strptime(date_str, '%Y-%m-%d')
log.info('Processing day: {0} {1} {2} {3}'.format(date_str, dt.year, dt.month, dt.day))
records = df[df['date'].str.contains(date_str)]
filename = "ingest_summary_{0}{1}{2}.csv".format(dt.year, dt.month, dt.day)
records.to_csv(filename, index=False)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tdate, total FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
os.remove(filename)
## Iterating days
days_path='{0}/data/dns/'.format(old_oa_path)
if os.path.exists(days_path):
for day_folder in fnmatch.filter(os.listdir(days_path), '2*'):
print day_folder
dt = datetime.datetime.strptime(day_folder, '%Y%m%d')
log.info('Processing day: {0} {1} {2} {3}'.format(day_folder, dt.year, dt.month, dt.day))
full_day_path = '{0}{1}'.format(days_path,day_folder)
## dns Scores and dns Threat Investigation
filename = '{0}/dns_scores.csv'.format(full_day_path)
if os.path.isfile(filename):
log.info("Processing Dns Scores")
staging_table_name = 'dns_scores_tmp'
dest_table_name = 'dns_scores'
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT frame_time, unix_tstamp, frame_len, ip_dst, dns_qry_name, dns_qry_class, dns_qry_type, dns_qry_rcode, ml_score, tld, query_rep, hh, dns_qry_class_name, dns_qry_type_name, dns_qry_rcode_name, network_context FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Processing dns Threat Investigation")
staging_table_name = 'dns_scores_tmp'
dest_table_name = 'dns_threat_investigation'
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT unix_tstamp, ip_dst, dns_qry_name, ip_sev, dns_sev FROM {5}.{6} WHERE ip_sev > 0 or dns_sev > 0;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
# dns Dendro
log.info("Processing Dns Dendro")
staging_table_name = 'dns_dendro_tmp'
dest_table_name = 'dns_dendro'
pattern = 'dendro*.csv'
dendro_files = fnmatch.filter(os.listdir(full_day_path), pattern)
filename = '{0}/{1}'.format(full_day_path, pattern)
if len(dendro_files) > 0:
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT unix_timestamp('{5}', 'yyyyMMMdd'), dns_a, dns_qry_name, ip_dst FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, day_folder, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
## dns Edge
log.info("Processing Dns Edge")
staging_table_name = 'dns_edge_tmp'
dest_table_name = 'dns_edge'
pattern = 'edge*.csv'
edge_files = fnmatch.filter(os.listdir(full_day_path), pattern)
for file in edge_files:
parts = (re.findall("edge-(\S+).csv", file)[0]).split('_')
hh = int(parts[-2])
mn = int(parts[-1])
log.info("Processing File: {0} with HH: {1} and MN: {2}".format(file, hh, mn))
log.info("Removing double quotes File: {0}".format(file))
fixed_file = '{0}.fixed'.format(file)
sed_cmd = "sed 's/\"//g' {0}/{1} > {0}/{2}".format(full_day_path, file, fixed_file)
util.execute_cmd(sed_cmd, log)
filename = '{0}/{1}'.format(full_day_path, fixed_file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT unix_timestamp(frame_time, 'MMMMM dd yyyy H:mm:ss.SSS z'), frame_len, ip_dst, ip_src, dns_qry_name, '', '0', '0', dns_a, {5}, dns_qry_class, dns_qry_type, dns_qry_rcode, '0' FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, hh, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
os.remove(filename)
##dns_storyboard
log.info("Processing Dns Storyboard")
staging_table_name = 'dns_storyboard_tmp'
dest_table_name = 'dns_storyboard'
filename = '{0}/threats.csv'.format(full_day_path)
if os.path.isfile(filename):
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT ip_threat, dns_threat, title, text FROM {5}.{6};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
# dns Threat Dendro
log.info("Processing Dns Threat Dendro")
staging_table_name = 'dns_threat_dendro_tmp'
dest_table_name = 'dns_threat_dendro'
pattern = 'threat-dendro*.csv'
threat_dendro_files = fnmatch.filter(os.listdir(full_day_path), pattern)
filename = '{0}/{1}'.format(full_day_path, pattern)
for file in threat_dendro_files:
ip = re.findall("threat-dendro-(\S+).csv", file)[0]
log.info("Processing File: {0} with IP:{1}".format(file, ip))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT '{5}', total, dns_qry_name, ip_dst FROM {6}.{7} WHERE dns_qry_name is not null;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, ip, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Dropping staging tables")
util.execute_cmd('impala-shell -i {0} --var=dbname={1} -c -f drop_dns_migration_tables.hql'.format(impala_daemon, staging_db),log)
log.info("Removing staging tables' path in HDFS")
util.execute_cmd('hadoop fs -rm -r {0}/dns/'.format(hdfs_staging_path),log)
log.info("Moving CSV data to backup folder")
util.execute_cmd('mkdir {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('cp -r {0}/data/dns/ {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('rm -r {0}/data/dns/'.format(old_oa_path),log)
log.info("Invalidating metadata in Impala to refresh tables content")
util.execute_cmd('impala-shell -i {0} -q "INVALIDATE METADATA;"'.format(impala_daemon),log)
log.info("Creating ipynb template structure and copying advanced mode and threat investigation ipynb templates for each pre-existing day in the new Spot location")
ipynb_pipeline_path = '{0}/ipynb/dns/'.format(old_oa_path)
if os.path.exists(ipynb_pipeline_path):
for folder in os.listdir(ipynb_pipeline_path):
log.info("Creating ipynb dns folders in new Spot locaiton: {0}".format(folder))
util.execute_cmd('mkdir -p {0}/ipynb/dns/{1}/'.format(new_oa_path, folder),log)
log.info("Copying advanced mode ipynb template")
util.execute_cmd('cp {0}/oa/dns/ipynb_templates/Advanced_Mode_master.ipynb {0}/ipynb/dns/{1}/Advanced_Mode.ipynb'.format(new_oa_path, folder),log)
log.info("Copying threat investigation ipynb template")
util.execute_cmd('cp {0}/oa/dns/ipynb_templates/Threat_Investigation_master.ipynb {0}/ipynb/dns/{1}/Threat_Investigation.ipynb'.format(new_oa_path, folder),log)
if __name__=='__main__':
main()
| apache-2.0 |
jorge2703/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
has2k1/mizani | mizani/tests/test_utils.py | 1 | 4277 | import numpy as np
import pandas as pd
import pytest
from mizani.utils import (round_any, min_max, match, precision,
first_element, multitype_sort,
same_log10_order_of_magnitude, get_categories)
def test_round_any():
x = 4.632
assert round_any(x, 1) == 5
assert round_any(x, 2) == 4
assert round_any(x, 3) == 6
assert round_any(x, 4) == 4
assert round_any(x, 5) == 5
assert round_any(x, 1.5) == 4.5
# Maintains the same index
s = pd.Series([1.1, 2.2, 3.3], index=[3, 2, 1])
result = round_any(s, 2)
assert s.index.equals(result.index)
def test_min_max():
x = [1, 2, 3, 4, 5]
_min, _max = min_max(x)
assert _min == 1
assert _max == 5
x = [1, float('-inf'), 3, 4, 5]
_min, _max = min_max(x)
assert _min == 1
assert _max == 5
_min, _max = min_max(x, finite=False)
assert _min == float('-inf')
assert _max == 5
x = [1, 2, float('nan'), 4, 5]
_min, _max = min_max(x, na_rm=True)
assert _min == 1
assert _max == 5
x = [1, 2, float('nan'), 4, 5, float('inf')]
_min, _max = min_max(x, na_rm=True, finite=False)
assert _min == 1
assert _max == float('inf')
_min, _max = min_max(x)
assert str(_min) == 'nan'
assert str(_max) == 'nan'
x = [float('nan'), float('nan'), float('nan')]
_min, _max = min_max(x, na_rm=True)
assert _min == float('-inf')
assert _max == float('inf')
def test_match():
v1 = [0, 1, 2, 3, 4, 5]
v2 = [5, 4, 3, 2, 1, 0]
result = match(v1, v2)
assert result == v2
# Positions of the first match
result = match(v1, v2+v2)
assert result == v2
result = match(v1, v2, incomparables=[1, 2])
assert result == [5, -1, -1, 2, 1, 0]
result = match(v1, v2, start=1)
assert result == [6, 5, 4, 3, 2, 1]
v2 = [5, 99, 3, 2, 1, 0]
result = match(v1, v2)
assert result == [5, 4, 3, 2, -1, 0]
def test_precision():
assert precision(0.0037) == .001
assert precision(0.5) == .1
assert precision(9) == 1
assert precision(24) == 10
assert precision(784) == 100
assert precision([0, 0]) == 1
def test_first_element():
x = [3, 4, 5]
s = pd.Series(x)
a = np.array([3, 4, 5])
assert first_element(x) == 3
assert first_element(s) == 3
assert first_element(s[1:]) == 4
assert first_element(a) == 3
assert first_element(a[1:]) == 4
with pytest.raises(StopIteration):
first_element([])
with pytest.raises(RuntimeError):
first_element(iter(x))
def test_multitype_sort():
a = ['c', float('nan'), 1, 'b', 'a', 2.0, 0]
result = multitype_sort(a)
# Any consecutive elements of the sametype are
# sorted
for i, x in enumerate(result[1:], start=1):
x_prev = result[i-1]
if (type(x_prev) is type(x)):
# cannot compare nan with anything
if (isinstance(x, (float, np.float)) and
(np.isnan(x_prev) or np.isnan(x))):
continue
assert x_prev <= x
def test_same_log10_order_of_magnitude():
# Default delta
assert same_log10_order_of_magnitude((2, 8))
assert same_log10_order_of_magnitude((35, 80.8))
assert same_log10_order_of_magnitude((232.3, 730))
assert not same_log10_order_of_magnitude((1, 18))
assert not same_log10_order_of_magnitude((35, 800))
assert not same_log10_order_of_magnitude((32, 730))
assert not same_log10_order_of_magnitude((1, 9.9))
assert not same_log10_order_of_magnitude((35, 91))
assert not same_log10_order_of_magnitude((232.3, 950))
# delta = 0
assert same_log10_order_of_magnitude((1, 9.9), delta=0)
assert same_log10_order_of_magnitude((35, 91), delta=0)
assert same_log10_order_of_magnitude((232.3, 950), delta=0)
def test_get_categories():
lst = list('abcd')
s = pd.Series(lst)
c = pd.Categorical(lst)
sc = pd.Series(c)
categories = pd.Index(lst)
assert categories.equals(get_categories(c))
assert categories.equals(get_categories(sc))
with pytest.raises(TypeError):
assert categories.equals(get_categories(lst))
with pytest.raises(TypeError):
assert categories.equals(get_categories(s))
| bsd-3-clause |
NovaSyst/chocolate | doc/conf.py | 1 | 6855 | # -*- coding: utf-8 -*-
#
# Chocolate documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 30 13:21:43 2016.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append("..")
#sys.path.append(os.path.abspath('_ext/'))
import chocolate
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel',
'sphinx.ext.autosummary', 'sphinx.ext.doctest',
'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.intersphinx',
'sphinx.ext.extlinks', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon']
try:
import matplotlib
except:
pass
else:
extensions += ['matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Chocolate'
copyright = u'2016-%s, NovaSyst' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = chocolate.__version__
# The full version, including alpha/beta/rc tags.
release = chocolate.__revision__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, the todo will be printed in the documentation
todo_include_todos = True
# Search in python documentation
intersphinx_mapping = {'python' : ('https://docs.python.org/3', None),
'numpy' : ('http://docs.scipy.org/doc/numpy', None),
'pandas': ('http://pandas-docs.github.io/pandas-docs-travis/', None)}
# Reload the cached values every 5 days
intersphinx_cache_limit = 5
# -- Options for pyplot extension ----------------------------------------------
# Default value for the include-source option
plot_include_source = False
# Code that should be executed before each plot.
#plot_pre_code
# Base directory, to which ``plot::`` file names are relative
# to. (If None or empty, file names are relative to the
# directory where the file containing the directive is.)
#plot_basedir
# Whether to show links to the files in HTML.
plot_html_show_formats = True
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
def setup(app):
app.add_stylesheet( "css/aiw.css" )
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'classic' and 'alabaster'.
html_theme = 'sphinx_rtd_theme'
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {
# 'collapse_navigation': False,
# 'display_version': False,
# 'navigation_depth': 2,
# }
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "deap_orange_icon_32.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Chocolate-doc'
| bsd-3-clause |
marionleborgne/nupic.research | htmresearch/frameworks/capybara/analysis.py | 1 | 7958 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import time
import datetime
from htmresearch.frameworks.capybara.sdr import load_sdrs
from htmresearch.frameworks.capybara.supervised import train_and_test
from htmresearch.frameworks.capybara.proj import (
project_vectors, project_matrix)
from htmresearch.frameworks.capybara.utils import (
get_logger, check_shape, indent, hours_minutes_seconds)
from htmresearch.frameworks.capybara.viz import (
plot_matrix, plot_projections, make_plot_title, make_subplots)
from htmresearch.frameworks.capybara.embedding import (
convert_to_embeddings, reshape_embeddings)
from htmresearch.frameworks.capybara.distance import (
distance_matrix, sequence_distance, reshaped_sequence_distance)
PHASES = ['train', 'test']
CELL_TYPES = ['sp', 'tm']
SP_OUT_WIDTH = 2048
TM_OUT_WIDTH = 65536
LOGGER = get_logger()
def analyze_sdr_sequences(sdr_sequences_train, sdr_sequences_test, data_id,
nb_chunks, n_neighbors, tsne, aggregation, plot_dir,
assume_sequence_alignment):
sdr_widths = {'sp': SP_OUT_WIDTH, 'tm': TM_OUT_WIDTH}
accuracies = {cell_type: {} for cell_type in CELL_TYPES}
dist_mats = {cell_type: {} for cell_type in CELL_TYPES}
embeddings = {cell_type: {} for cell_type in CELL_TYPES}
X = {cell_type: {} for cell_type in CELL_TYPES}
y = {}
# Step 1: convert the SDR sequences to "sequence embeddings" and compute the
# pair-wise sequence distances.
for phase, sdr_sequences in zip(PHASES,
[sdr_sequences_train, sdr_sequences_test]):
# Sort by label to make it easier to visualize embeddings later.
sorted_sdr_sequences = sdr_sequences.sort_values('label')
y[phase] = sorted_sdr_sequences.label.values
# Convert SDRs to embeddings.
(embeddings['sp'][phase],
embeddings['tm'][phase]) = convert_to_embeddings(sorted_sdr_sequences,
aggregation,
nb_chunks)
# Make sure the shapes are ok.
nb_sequences = len(sorted_sdr_sequences)
for cell_type in CELL_TYPES:
check_shape(embeddings[cell_type][phase], (nb_sequences, nb_chunks,
sdr_widths[cell_type]))
check_shape(embeddings[cell_type][phase], (nb_sequences, nb_chunks,
sdr_widths[cell_type]))
check_shape(y[phase], (nb_sequences,))
# Compute distance matrix.
distance = lambda a, b: sequence_distance(a, b, assume_sequence_alignment)
dist_mats['sp'][phase], dist_mats['tm'][phase], _ = distance_matrix(
embeddings['sp'][phase], embeddings['tm'][phase], distance)
# Step 2: Flatten the sequence embeddings to be able to classify each
# sequence with a supervised classifier. The classifier uses the same
# sequence distance as the distance matrix.
for cell_type in CELL_TYPES:
# Flatten embeddings.
# Note: we have to flatten X because sklearn doesn't allow for X to be > 2D.
# Here, the initial shape of X (i.e. sequence embeddings) is 3D and
# therefore has to be flattened to 2D. See the logic of reshape_embeddings()
# for details on how the embeddings are converted from 2D to 3D.
nb_sequences = len(embeddings[cell_type]['train'])
X[cell_type]['train'] = reshape_embeddings(embeddings[cell_type]['train'],
nb_sequences, nb_chunks,
sdr_widths[cell_type])
X[cell_type]['test'] = reshape_embeddings(embeddings[cell_type]['test'],
nb_sequences, nb_chunks,
sdr_widths[cell_type])
sequence_embedding_shape = (nb_chunks, sdr_widths[cell_type])
reshaped_distance = lambda a, b: reshaped_sequence_distance(
a, b, sequence_embedding_shape, assume_sequence_alignment)
# Compute train and test accuracies
(accuracies[cell_type]['train'],
accuracies[cell_type]['test']) = train_and_test(X[cell_type]['train'],
y['train'],
X[cell_type]['test'],
y['test'],
reshaped_distance,
n_neighbors)
# Step 3: plot the distance matrix and 2D projections for each cell
# type (SP or TM) and phase (train or test).
n_plots = 2 # distance matrix + 2d projection
fig, ax, plot_path = make_subplots(len(PHASES), n_plots, plot_dir, data_id,
cell_type, nb_chunks, aggregation)
for phase in PHASES:
phase_idx = PHASES.index(phase)
title = make_plot_title('Pair-wise distances', phase,
accuracies[cell_type][phase])
plot_matrix(dist_mats[cell_type][phase], title, fig, ax[phase_idx][0])
if tsne:
embeddings_proj = project_vectors(X[cell_type][phase], reshaped_distance)
# Re-use the distance matrix to compute the 2D projections. It's faster.
# embeddings_proj = project_matrix(dist_mats[cell_type][phase])
title = make_plot_title('TSNE 2d projections', phase,
accuracies[cell_type][phase])
plot_projections(embeddings_proj, y[phase], title, fig,
ax[phase_idx][1])
fig.savefig(plot_path)
return accuracies
def run_analysis(trace_dir, data_ids, chunks, n_neighbors, tsne, aggregations,
plot_dir, assume_sequence_alignment):
if not os.path.exists(plot_dir): os.makedirs(plot_dir)
tic = time.time()
LOGGER.info('Analysis tree')
for data_id in data_ids:
LOGGER.info(indent(1) + 'load: ' + data_id)
sdr_sequences = {}
for phase in PHASES:
f_path = os.path.join(trace_dir, 'trace_%s_%s' % (data_id, phase))
sdr_sequences[phase] = load_sdrs(f_path, SP_OUT_WIDTH, TM_OUT_WIDTH)
LOGGER.info(indent(2) + 'loaded: ' + f_path)
LOGGER.info(indent(1) + 'analyze: ' + data_id)
for aggregation in aggregations:
LOGGER.info(indent(2) + 'aggregation: ' + aggregation)
for nb_chunks in chunks:
LOGGER.info(indent(3) + 'nb_chunks: ' + str(nb_chunks))
accuracies = analyze_sdr_sequences(
sdr_sequences['train'].copy(), sdr_sequences['test'].copy(), data_id,
nb_chunks, n_neighbors, tsne, aggregation, plot_dir,
assume_sequence_alignment)
for cell_type, train_test_acc in accuracies.items():
for phase, acc in train_test_acc.items():
LOGGER.info(indent(4) + '%s %s accuracy: %s /100'
% (cell_type.upper(), phase, acc))
toc = time.time()
td = datetime.timedelta(seconds=(toc - tic))
LOGGER.info('Elapsed time: %dh %02dm %02ds' % hours_minutes_seconds(td))
| agpl-3.0 |
ZenDevelopmentSystems/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
addfor/addutils | addutils/pandas_addtut.py | 1 | 1859 | # The MIT License (MIT)
#
# Copyright (c) 2015 addfor s.r.l.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pandas as pd
from IPython.core import display
def side_by_side(*objs, **kwds):
space = kwds.get('space', 4)
reprs = [repr(obj).split('\n') for obj in objs]
print('-' * 40)
print(pd.core.common.adjoin(space, *reprs))
print('-' * 40)
def side_by_side2(*objs, **kwd):
tables = [pd.DataFrame(df)._repr_html_() for df in objs]
res = ""
for t in tables:
#style_pos = t.find('style="')+len('style="')
#new_table = t[:style_pos] + "float:left;padding-right: 0.6em;" + t[style_pos:]
TARGET = "<div"
div_pos = t.find(TARGET) + len(TARGET)
new_table = t[:div_pos] + ' style="float:left;padding-right: 0.6em;"' + t[div_pos:]
res += new_table
return res
| mit |
kdebrab/pandas | doc/make.py | 1 | 13767 | #!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
Usage
-----
$ python make.py clean
$ python make.py html
$ python make.py latex
"""
import importlib
import sys
import os
import shutil
# import subprocess
import argparse
from contextlib import contextmanager
import webbrowser
import jinja2
DOC_PATH = os.path.dirname(os.path.abspath(__file__))
SOURCE_PATH = os.path.join(DOC_PATH, 'source')
BUILD_PATH = os.path.join(DOC_PATH, 'build')
BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates']
@contextmanager
def _maybe_exclude_notebooks():
"""Skip building the notebooks if pandoc is not installed.
This assumes that nbsphinx is installed.
Skip notebook conversion if:
1. nbconvert isn't installed, or
2. nbconvert is installed, but pandoc isn't
"""
# TODO move to exclude_pattern
base = os.path.dirname(__file__)
notebooks = [os.path.join(base, 'source', nb)
for nb in ['style.ipynb']]
contents = {}
def _remove_notebooks():
for nb in notebooks:
with open(nb, 'rt') as f:
contents[nb] = f.read()
os.remove(nb)
try:
import nbconvert
except ImportError:
sys.stderr.write('Warning: nbconvert not installed. '
'Skipping notebooks.\n')
_remove_notebooks()
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
sys.stderr.write('Warning: Pandoc is not installed. '
'Skipping notebooks.\n')
_remove_notebooks()
yield
for nb, content in contents.items():
with open(nb, 'wt') as f:
f.write(content)
class DocBuilder:
"""Class to wrap the different commands of this script.
All public methods of this class can be called as parameters of the
script.
"""
def __init__(self, num_jobs=1, include_api=True, single_doc=None,
verbosity=0):
self.num_jobs = num_jobs
self.include_api = include_api
self.verbosity = verbosity
self.single_doc = None
self.single_doc_type = None
if single_doc is not None:
self._process_single_doc(single_doc)
self.exclude_patterns = self._exclude_patterns
self._generate_index()
if self.single_doc_type == 'docstring':
self._run_os('sphinx-autogen', '-o',
'source/generated_single', 'source/index.rst')
@property
def _exclude_patterns(self):
"""Docs source files that will be excluded from building."""
# TODO move maybe_exclude_notebooks here
if self.single_doc is not None:
rst_files = [f for f in os.listdir(SOURCE_PATH)
if ((f.endswith('.rst') or f.endswith('.ipynb'))
and (f != 'index.rst')
and (f != '{0}.rst'.format(self.single_doc)))]
if self.single_doc_type != 'api':
rst_files += ['generated/*.rst']
elif not self.include_api:
rst_files = ['api.rst', 'generated/*.rst']
else:
rst_files = ['generated_single/*.rst']
exclude_patterns = ','.join(
'{!r}'.format(i) for i in ['**.ipynb_checkpoints'] + rst_files)
return exclude_patterns
def _process_single_doc(self, single_doc):
"""Extract self.single_doc (base name) and self.single_doc_type from
passed single_doc kwarg.
"""
self.include_api = False
if single_doc == 'api.rst' or single_doc == 'api':
self.single_doc_type = 'api'
self.single_doc = 'api'
elif os.path.exists(os.path.join(SOURCE_PATH, single_doc)):
self.single_doc_type = 'rst'
self.single_doc = os.path.splitext(os.path.basename(single_doc))[0]
elif os.path.exists(
os.path.join(SOURCE_PATH, '{}.rst'.format(single_doc))):
self.single_doc_type = 'rst'
self.single_doc = single_doc
elif single_doc is not None:
try:
obj = pandas # noqa: F821
for name in single_doc.split('.'):
obj = getattr(obj, name)
except AttributeError:
raise ValueError('Single document not understood, it should '
'be a file in doc/source/*.rst (e.g. '
'"contributing.rst" or a pandas function or '
'method (e.g. "pandas.DataFrame.head")')
else:
self.single_doc_type = 'docstring'
if single_doc.startswith('pandas.'):
self.single_doc = single_doc[len('pandas.'):]
else:
self.single_doc = single_doc
def _copy_generated_docstring(self):
"""Copy existing generated (from api.rst) docstring page because
this is more correct in certain cases (where a custom autodoc
template is used).
"""
fname = os.path.join(SOURCE_PATH, 'generated',
'pandas.{}.rst'.format(self.single_doc))
temp_dir = os.path.join(SOURCE_PATH, 'generated_single')
try:
os.makedirs(temp_dir)
except OSError:
pass
if os.path.exists(fname):
try:
# copying to make sure sphinx always thinks it is new
# and needs to be re-generated (to pick source code changes)
shutil.copy(fname, temp_dir)
except: # noqa
pass
def _generate_index(self):
"""Create index.rst file with the specified sections."""
if self.single_doc_type == 'docstring':
self._copy_generated_docstring()
with open(os.path.join(SOURCE_PATH, 'index.rst.template')) as f:
t = jinja2.Template(f.read())
with open(os.path.join(SOURCE_PATH, 'index.rst'), 'w') as f:
f.write(t.render(include_api=self.include_api,
single_doc=self.single_doc,
single_doc_type=self.single_doc_type))
@staticmethod
def _create_build_structure():
"""Create directories required to build documentation."""
for dirname in BUILD_DIRS:
try:
os.makedirs(os.path.join(BUILD_PATH, dirname))
except OSError:
pass
@staticmethod
def _run_os(*args):
"""Execute a command as a OS terminal.
Parameters
----------
*args : list of str
Command and parameters to be executed
Examples
--------
>>> DocBuilder()._run_os('python', '--version')
"""
# TODO check_call should be more safe, but it fails with
# exclude patterns, needs investigation
# subprocess.check_call(args, stderr=subprocess.STDOUT)
os.system(' '.join(args))
def _sphinx_build(self, kind):
"""Call sphinx to build documentation.
Attribute `num_jobs` from the class is used.
Parameters
----------
kind : {'html', 'latex'}
Examples
--------
>>> DocBuilder(num_jobs=4)._sphinx_build('html')
"""
if kind not in ('html', 'latex', 'spelling'):
raise ValueError('kind must be html, latex or '
'spelling, not {}'.format(kind))
self._run_os('sphinx-build',
'-j{}'.format(self.num_jobs),
'-b{}'.format(kind),
'-{}'.format(
'v' * self.verbosity) if self.verbosity else '',
'-d{}'.format(os.path.join(BUILD_PATH, 'doctrees')),
'-Dexclude_patterns={}'.format(self.exclude_patterns),
SOURCE_PATH,
os.path.join(BUILD_PATH, kind))
def _open_browser(self):
base_url = os.path.join('file://', DOC_PATH, 'build', 'html')
if self.single_doc_type == 'docstring':
url = os.path.join(
base_url,
'generated_single', 'pandas.{}.html'.format(self.single_doc))
else:
url = os.path.join(base_url, '{}.html'.format(self.single_doc))
webbrowser.open(url, new=2)
def html(self):
"""Build HTML documentation."""
self._create_build_structure()
with _maybe_exclude_notebooks():
self._sphinx_build('html')
zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip')
if os.path.exists(zip_fname):
os.remove(zip_fname)
if self.single_doc is not None:
self._open_browser()
shutil.rmtree(os.path.join(SOURCE_PATH, 'generated_single'),
ignore_errors=True)
def latex(self, force=False):
"""Build PDF documentation."""
self._create_build_structure()
if sys.platform == 'win32':
sys.stderr.write('latex build has not been tested on windows\n')
else:
self._sphinx_build('latex')
os.chdir(os.path.join(BUILD_PATH, 'latex'))
if force:
for i in range(3):
self._run_os('pdflatex',
'-interaction=nonstopmode',
'pandas.tex')
raise SystemExit('You should check the file '
'"build/latex/pandas.pdf" for problems.')
else:
self._run_os('make')
def latex_forced(self):
"""Build PDF documentation with retries to find missing references."""
self.latex(force=True)
@staticmethod
def clean():
"""Clean documentation generated files."""
shutil.rmtree(BUILD_PATH, ignore_errors=True)
shutil.rmtree(os.path.join(SOURCE_PATH, 'generated'),
ignore_errors=True)
def zip_html(self):
"""Compress HTML documentation into a zip file."""
zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip')
if os.path.exists(zip_fname):
os.remove(zip_fname)
dirname = os.path.join(BUILD_PATH, 'html')
fnames = os.listdir(dirname)
os.chdir(dirname)
self._run_os('zip',
zip_fname,
'-r',
'-q',
*fnames)
def spellcheck(self):
"""Spell check the documentation."""
self._sphinx_build('spelling')
output_location = os.path.join('build', 'spelling', 'output.txt')
with open(output_location) as output:
lines = output.readlines()
if lines:
raise SyntaxError(
'Found misspelled words.'
' Check pandas/doc/build/spelling/output.txt'
' for more details.')
def main():
cmds = [method for method in dir(DocBuilder) if not method.startswith('_')]
argparser = argparse.ArgumentParser(
description='pandas documentation builder',
epilog='Commands: {}'.format(','.join(cmds)))
argparser.add_argument('command',
nargs='?',
default='html',
help='command to run: {}'.format(', '.join(cmds)))
argparser.add_argument('--num-jobs',
type=int,
default=1,
help='number of jobs used by sphinx-build')
argparser.add_argument('--no-api',
default=False,
help='ommit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=None,
help=('filename of section or method name to '
'compile, e.g. "indexing", "DataFrame.join"'))
argparser.add_argument('--python-path',
type=str,
default=os.path.dirname(DOC_PATH),
help='path')
argparser.add_argument('-v', action='count', dest='verbosity', default=0,
help=('increase verbosity (can be repeated), '
'passed to the sphinx build command'))
args = argparser.parse_args()
if args.command not in cmds:
raise ValueError('Unknown command {}. Available options: {}'.format(
args.command, ', '.join(cmds)))
# Below we update both os.environ and sys.path. The former is used by
# external libraries (namely Sphinx) to compile this module and resolve
# the import of `python_path` correctly. The latter is used to resolve
# the import within the module, injecting it into the global namespace
os.environ['PYTHONPATH'] = args.python_path
sys.path.append(args.python_path)
globals()['pandas'] = importlib.import_module('pandas')
# Set the matplotlib backend to the non-interactive Agg backend for all
# child processes.
os.environ['MPLBACKEND'] = 'module://matplotlib.backends.backend_agg'
builder = DocBuilder(args.num_jobs, not args.no_api, args.single,
args.verbosity)
getattr(builder, args.command)()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
shahankhatch/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
brightchen/h2o-3 | py2/testdir_single_jvm/test_GLM_hastie_shuffle.py | 20 | 5925 | import unittest, time, sys, random, copy
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm, h2o_util
from h2o_test import verboseprint, dump_json, OutputObj
# Dataset created from this:
# Elements of Statistical Learning 2nd Ed.; Hastie, Tibshirani, Friedman; Feb 2011
# example 10.2 page 357
# Ten features, standard independent Gaussian. Target y is:
# y[i] = 1 if sum(X[i]) > .34 else -1
# 9.34 is the median of a chi-squared random variable with 10 degrees of freedom
# (sum of squares of 10 standard Gaussians)
# http://www.stanford.edu/~hastie/local.ftp/Springer/ESLII_print5.pdf
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting GLM of", csvFilename
# we can force a col type to enum now? with param columnTypes
# "Numeric"
# make the last column enum
# Instead of string for parse, make this a dictionary, with column index, value
# that's used for updating the ColumnTypes array before making it a string for parse
columnTypeDict = {10: 'Enum'}
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, columnTypeDict=columnTypeDict,
hex_key=csvFilename + ".hex", schema='put', timeoutSecs=30)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
for i in range(10):
print "Summary on column", i
# FIX! how come only 0 works here for column
co = h2o_cmd.runSummary(key=parse_key, column=i)
for k,v in co:
print k, v
expected = []
allowedDelta = 0
labelListUsed = list(labelList)
labelListUsed.remove('C11')
numColsUsed = numCols - 1
parameters = {
'validation_frame': parse_key,
'ignored_columns': None,
# FIX! for now just use a column that's binomial
'response_column': 'C11',
# FIX! when is this needed? redundant for binomial?
'balance_classes': False,
'max_after_balance_size': None,
'standardize': False,
'family': 'binomial',
'link': None,
'alpha': '[1e-4]',
'lambda': '[0.5,0.25, 0.1]',
'lambda_search': None,
'nlambdas': None,
'lambda_min_ratio': None,
# 'use_all_factor_levels': False,
}
start = time.time()
model_key = 'hastie_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
model_id=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=60)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult, 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
# compare this glm to the first one. since the files are replications, the results
# should be similar?
if self.validation1:
h2o_glm.compareToFirstGlm(self, 'AUC', validation, self.validation1)
else:
# self.validation1 = copy.deepcopy(validation)
self.validation1 = None
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
validation1 = {}
def test_GLM_hastie_shuffle(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
# This test also adds file shuffling, to see that row order doesn't matter
csvFilename = "1mx10_hastie_10_2.data.gz"
bucket = 'home-0xdiag-datasets'
csvPathname = 'standard' + '/' + csvFilename
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename1xShuf = "hastie_1x.data_shuf"
pathname1xShuf = SYNDATASETS_DIR + '/' + filename1xShuf
h2o_util.file_shuffle(pathname1x, pathname1xShuf)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1xShuf, pathname1xShuf, pathname2x)
filename2xShuf = "hastie_2x.data_shuf"
pathname2xShuf = SYNDATASETS_DIR + '/' + filename2xShuf
h2o_util.file_shuffle(pathname2x, pathname2xShuf)
glm_doit(self, filename2xShuf, None, pathname2xShuf, timeoutSecs=45)
# too big to shuffle?
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2xShuf,pathname2xShuf,pathname4x)
glm_doit(self,filename4x, None, pathname4x, timeoutSecs=120)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
phobson/mpl-probscale | probscale/tests/test_validate.py | 1 | 2721 | from matplotlib import pyplot
import pytest
from probscale import validate
from probscale import algo
def test_axes_object_invalid():
with pytest.raises(ValueError):
validate.axes_object("junk")
def test_axes_object_with_ax():
fig, ax = pyplot.subplots()
fig1, ax1 = validate.axes_object(ax)
assert isinstance(ax1, pyplot.Axes)
assert isinstance(fig1, pyplot.Figure)
assert ax1 is ax
assert fig1 is fig
def test_axes_object_with_None():
fig1, ax1 = validate.axes_object(None)
assert isinstance(ax1, pyplot.Axes)
assert isinstance(fig1, pyplot.Figure)
@pytest.mark.parametrize(
("which", "kwarg"),
[
("x", "fitprobs"),
("y", "fitprobs"),
("y", "fitlogs"),
("both", "fitprobs"),
("both", "fitlogs"),
(None, "fitprobs"),
(None, "fitlogs"),
],
)
def test_fit_arguments_valid(which, kwarg):
result = validate.fit_argument(which, kwarg)
assert result == which
@pytest.mark.parametrize("kwarg", ["fitprobs", "fitlogs"])
def test_fit_arguments_invalid(kwarg):
with pytest.raises(ValueError):
validate.fit_argument("junk", kwarg)
@pytest.mark.parametrize(
("value", "error"), [("x", None), ("y", None), ("junk", ValueError)]
)
def test_axis_name(value, error):
if error is not None:
with pytest.raises(error):
validate.axis_name(value, "axname")
else:
assert value == validate.axis_name(value, "axname")
@pytest.mark.parametrize(
("value", "expected", "error"),
[
("PP", "pp", None),
("Qq", "qq", None),
("ProB", "prob", None),
("junk", None, ValueError),
],
)
def test_axis_type(value, expected, error):
if error is not None:
with pytest.raises(error):
validate.axis_type(value)
else:
assert expected == validate.axis_type(value)
@pytest.mark.parametrize(
("value", "expected"), [(None, dict()), (dict(a=1, b="test"), dict(a=1, b="test"))]
)
def test_other_options(value, expected):
assert validate.other_options(value) == expected
@pytest.mark.parametrize(("value", "expected"), [(None, ""), ("test", "test")])
def test_axis_label(value, expected):
result = validate.axis_label(value)
assert result == expected
@pytest.mark.parametrize(
("value", "expected", "error"),
[
("fit", algo._bs_fit, None),
("resids", None, NotImplementedError),
("junk", None, ValueError),
],
)
def test_estimator(value, expected, error):
if error is not None:
with pytest.raises(error):
validate.estimator(value)
else:
est = validate.estimator(value)
assert est is expected
| bsd-3-clause |
bede/sparNA | sparna.py | 1 | 29351 | #!/usr/bin/env python3
# Author: Bede Constantinides - b|at|bede|dot|im
# TODO
# | Decide on BWA vs Bowtie2
# | GZIP support
# | Report % read alignment in mapping to ref and contig
# | Interleaved reads (ONE TRUE FORMAT)
# | add minimum similarity threshold for reference selection
# | report on trimming, %remapped
# | Bootstrap/dogfoood assemblies with --trusted-contigs etc ?
# DEPENDENCIES
# | python packages:
# | argh, biopython, khmer, plotly
# | others, expected inside $PATH:
# | bwa, bowtie2, samtools, vcftools, bcftools, bedtools, seqtk, spades, quast
# | others, bundled inside res/ directory:
# | trimmomatic
import os
import io
import sys
import argh
import time
import json
import pandas
import logging
import requests
import subprocess
import multiprocessing
import concurrent.futures
import pprint
from collections import OrderedDict
from Bio import SeqIO
from Bio import SeqUtils
import plotly.offline as py
import plotly.graph_objs as go
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run(cmd):
return subprocess.run(cmd,
shell=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def name_sample(fwd_fq):
fwd_fq_prefix = os.path.splitext(os.path.split(fwd_fq)[1])[0]
return fwd_fq_prefix
def import_reads(fwd_fq, rev_fq, params):
print('Importing reads...')
cmd = (
'cat {fwd_fq} > {out}/raw/{name}.f.fastq '
'&& cat {rev_fq} > {out}/raw/{name}.r.fastq '
'&& interleave-reads.py {out}/raw/{name}.f.fastq {out}/raw/{name}.r.fastq '
'> {out}/raw/{name}.fr.fastq'
.format(**params, fwd_fq=fwd_fq, rev_fq=rev_fq))
logger.info(cmd)
cmd_run = run(cmd)
logger.info(cmd_run.stdout, cmd_run.stderr)
print('\tDone') if cmd_run.returncode == 0 else sys.exit('ERR_IMPORT')
def trim(norm_k_list, params):
print('Trimming...')
# Fetch smallest norm_k for trimming with Trimmomatic min_len - Screed bug workaround
params['min_len'] = max(map(int, norm_k_list.split(',')))
cmd = (
'java -jar {pipe}/res/trimmomatic-0.33.jar PE'
' {out}/raw/{name}.f.fastq'
' {out}/raw/{name}.r.fastq'
' {out}/trim/{name}.f_pe.fastq'
' {out}/trim/{name}.f_se.fastq'
' {out}/trim/{name}.r_pe.fastq'
' {out}/trim/{name}.r_se.fastq'
' ILLUMINACLIP:{pipe}/res/illumina_adapters.fa:2:30:10'.format(**params))
if params['qual_trim']:
print('\tQuality trimming...')
logger.info('Quality trimming...')
cmd += ' SLIDINGWINDOW:4:20'.format(**params)
cmd += (
' MINLEN:{min_len}'
' && cat {out}/trim/{name}.f_se.fastq {out}/trim/{name}.r_se.fastq'
' > {out}/trim/{name}.se.fastq'
' && interleave-reads.py {out}/trim/{name}.f_pe.fastq {out}/trim/{name}.r_pe.fastq'
' > {out}/trim/{name}.fr.fastq'.format(**params))
logger.info(cmd)
cmd_run = run(cmd)
logger.info(cmd_run.stderr)
print('\tDone') if cmd_run.returncode == 0 else sys.exit('ERR_TRIM')
def normalise(norm_perms, params):
print('Normalising...')
cmds = []
for norm_perm in norm_perms:
cmd_vars = dict(**params,
k=str(norm_perm['k']),
c=str(norm_perm['c']))
cmd = (
'normalize-by-median.py -C {c} -k {k} -N 4 -x 1e8 -p'
' {out}/trim/{name}.fr.fastq'
' -o {out}/norm/{name}.norm_k{k}c{c}.fr.fastq'
' && normalize-by-median.py -C {c} -k {k} -N 4 -x 1e9'
' {out}/trim/{name}.se.fastq'
' -o {out}/norm/{name}.norm_k{k}c{c}.se.fastq'
' && split-paired-reads.py'
' -1 {out}/norm/{name}.norm_k{k}c{c}.f_pe.fastq'
' -2 {out}/norm/{name}.norm_k{k}c{c}.r_pe.fastq'
' {out}/norm/{name}.norm_k{k}c{c}.fr.fastq'
' && cat {out}/norm/{name}.norm_k{k}c{c}.fr.fastq'
' {out}/norm/{name}.norm_k{k}c{c}.se.fastq >'
' {out}/norm/{name}.norm_k{k}c{c}.pe_and_se.fastq'.format(**cmd_vars))
cmds.append(cmd)
print('\tNormalising norm_c={c}, norm_k={k}'.format(**cmd_vars))
logger.info('Normalising norm_c={c}, norm_k={k}'.format(**cmd_vars))
with multiprocessing.Pool(params['threads']) as pool:
results = pool.map(run, cmds)
logger.info([result.stdout + result.stdout for result in results])
print('\tAll done') if not max([r.returncode for r in results]) else sys.exit('ERR_NORM')
return norm_perms
def assemble(asm_perms, params):
'''
Performs multiple assemblies and returns and OrderedDict of assembly names and paths
'''
print('Assembling...')
if params['asm_k']:
asm_k_fmt = 'k' + 'k'.join(params['asm_k'])
else:
asm_k_fmt = 'k'
cmds_asm = []
cmd_vars = dict(**params,
asm_k_fmt=asm_k_fmt)
if params['no_norm']:
cmd_asm = (
'spades.py -m 8 -t 12'
' --12 {out}/trim/{name}.fr.fastq'
' -s {out}/trim/{name}.se.fastq'
' -o {out}/asm/{name}.no_norm.asm_{asm_k_fmt} --careful'.format(**cmd_vars))
print('\tAssembling without prior normalisation'.format(**cmd_vars))
cmds_asm.append(cmd_asm)
for asm_perm in asm_perms:
cmd_vars['k'] = str(asm_perm['k'])
cmd_vars['c'] = str(asm_perm['c'])
cmd_asm = (
'spades.py -m 8 -t {threads}'
' --pe1-1 {out}/norm/{name}.norm_k{k}c{c}.f_pe.fastq'
' --pe1-2 {out}/norm/{name}.norm_k{k}c{c}.r_pe.fastq'.format(**cmd_vars))
if params['asm_k']:
cmd_asm += ' -k {asm_k}'.format(**cmd_vars)
cmd_asm += (
' --s1 {out}/norm/{name}.norm_k{k}c{c}.se.fastq'
' -o {out}/asm/{name}.norm_k{k}c{c}.asm_{asm_k_fmt} --careful'.format(**cmd_vars))
cmds_asm.append(cmd_asm)
print('\tAssembling norm_c={c}, norm_k={k}, asm_k={asm_k}'.format(**cmd_vars))
with multiprocessing.Pool(params['threads']) as pool:
results = pool.map(run, cmds_asm)
logger.info([result.stdout for result in results])
print('\tAll done') if not max([r.returncode for r in results]) else sys.exit('ERR_ASM')
asms = os.listdir(params['out'] + '/asm')
asm_paths = [params['out'] + '/asm/' + asm + '/contigs.fasta' for asm in asms]
return OrderedDict(zip(asms, asm_paths))
def prune_assemblies(asms_paths, min_len, params):
asms_paths_pruned = OrderedDict()
for asm, path in asms_paths.items():
asms_paths_pruned[asm] = path.replace('/asm/', '/asm_prune/')
records = (r for r in SeqIO.parse(path, 'fasta') if len(r.seq) >= min_len)
os.makedirs(asms_paths_pruned[asm].replace('/contigs.fasta', ''))
SeqIO.write(records, asms_paths_pruned[asm], 'fasta')
return asms_paths_pruned
def gc_content(asms_paths):
asms_gc = {}
for asm, path in asms_paths.items():
asm_gc = []
for record in SeqIO.parse(path, 'fasta'):
asm_gc.append(SeqUtils.GC(record.seq)/100)
asms_gc[asm] = asm_gc
return asms_gc
def map_to_assemblies(asms_paths, params):
'''
Map original reads to each assembly with Bowtie2
Record mapping statistics
Screen uniquely mapped reads and quantify reads mapped per contig
Returns dict of tuples containing contig_len and n_reads_mapped for each contig
'''
print('Aligning to assemblies... (Bowtie2)')
asms_coverages = {}
for asm, asm_path in asms_paths.items():
i = 0
print(asm, i)
cmd_vars = {**params,
'asm':asm,
'asm_path':asm_path}
cmds = [
'bowtie2-build -q {asm_path} {out}/remap/{asm}',
'bowtie2 -x {out}/remap/{asm} --no-unal --very-sensitive-local --threads {threads}'
' -1 {out}/raw/{name}.f.fastq'
' -2 {out}/raw/{name}.r.fastq'
' -S {out}/remap/{asm}.sam'
' 2> {out}/remap/{asm}.bt2.stats',
'grep -v XS:i: {out}/remap/{asm}.sam > {out}/remap/{asm}.uniq.sam',
'samtools view -bS {out}/remap/{asm}.uniq.sam'
' | samtools sort - -o {out}/remap/{asm}.uniq.bam',
'samtools index {out}/remap/{asm}.uniq.bam',
'samtools idxstats {out}/remap/{asm}.uniq.bam'
' > {out}/remap/{asm}.uniq.bam.stats',
'rm {out}/remap/{asm}.sam {out}/remap/{asm}.uniq.sam']
cmds = [cmd.format(**cmd_vars) for cmd in cmds]
for cmd in cmds:
logger.info(cmd)
cmd_run = run(cmd)
logger.info(cmd_run.stdout)
cmd_prefix = cmd.split(' ')[0]
print('\tDone (' +cmd_prefix+ ')') if cmd_run.returncode == 0 else sys.exit('ERR_REMAP')
with open('{out}/remap/{asm}.bt2.stats'.format(**cmd_vars), 'r') as bt2_stats:
map_prop = float(bt2_stats.read().partition('% overall')[0].split('\n')[-1].strip())/100
asm_coverages = []
with open('{out}/remap/{asm}.uniq.bam.stats'.format(**cmd_vars), 'r') as bam_stats:
for line in bam_stats:
if not line.startswith('*'):
reads_mapped = int(line.strip().split('\t')[2])
asm_coverages.append(int(reads_mapped))
asms_coverages[asm] = asm_coverages
return asms_coverages
def onecodex_lca(seq, onecodex_api_key):
'''
Returns dict of OneCodex real-time API k-mer hits for a given sequence
e.g. {'elapsed_secs':'0.0003','k': 31,'n_hits': 97,'n_lookups': 128,'tax_id': 9606}
'''
url = 'https://app.onecodex.com/api/v0/search'
payload = {'sequence':str(seq)}
auth = requests.auth.HTTPBasicAuth(onecodex_api_key, '')
response = requests.post(url, payload, auth=auth, timeout=5)
result = json.loads(response.text)
result['prop_hits'] = round(int(result['n_hits'])/int(result['n_lookups']), 3)
return result
def ebi_taxid_to_lineage(tax_id):
'''
Returns scientific name and lineage for a given taxid using EBI's taxonomy API
e.g.('Retroviridae', ['Viruses', 'Retro-transcribing viruses'])
'''
url = 'http://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/tax-id/{}'
if tax_id == 0 or tax_id == 1:
return None, None
response = requests.get(url.format(tax_id), timeout=5)
result = json.loads(response.text)
sciname = result['scientificName']
taxonomy = [x for x in result['lineage'].split('; ') if x]
return sciname, taxonomy
def onecodex_lca_taxa(seqrecord, onecodex_api_key):
'''
Returns a scientific name and lineage for a SeqRecord using OneCodex and EBI APIs
e.g. ('NODE_3_length_4481_cov_46.6129_ID_7947',
('Hepatitis C virus genotype 3',
['Viruses',
'ssRNA viruses',
'ssRNA positive-strand viruses, no DNA stage',
'Flaviviridae',
'Hepacivirus']))
'''
hits = onecodex_lca(str(seqrecord.seq), onecodex_api_key)
sciname, taxonomy = ebi_taxid_to_lineage(hits['tax_id'])
result = (sciname, taxonomy, hits)
return result
def fasta_onecodex_lca_taxa(fasta_path, onecodex_api_key):
'''
Executes onecodex_lca_taxa() in parallel for a multifasta file
'''
seqrecords = SeqIO.parse(fasta_path, 'fasta')
taxa = {}
with concurrent.futures.ThreadPoolExecutor(50) as executor:
futures = {executor.submit(onecodex_lca_taxa, seqrecord, onecodex_api_key): seqrecord for seqrecord in seqrecords}
for future in concurrent.futures.as_completed(futures):
seqrecord = futures[future]
try:
data = future.result()
except Exception as exception:
taxa[seqrecord.id] = (None, None, None)
logger.info('Skipping {}'.format(seqrecord.id))
else:
taxa[seqrecord.id] = future.result()
return taxa
def onecodex_assemblies(asms_paths, onecodex_api_key):
'''
Returns OneCodex hits for a dict of assembly names and corresponding paths
'''
print('Fetching LCA taxonomic assignments... (requires network access)')
sample_results = OrderedDict()
for asm_name, asm_path in asms_paths.items():
print('\tAssembly {}'.format(asm_name))
sample_results[asm_name] = fasta_onecodex_lca_taxa(asm_path, onecodex_api_key)
return sample_results
def seqrecords(fasta_path):
'''
Accepts path to multifasta, returns list of Biopython SeqRecords
'''
return SeqIO.parse(fasta_path, 'fasta')
def lengths(asms_paths):
'''
Accepts path to multifasta, returns OrderedDict of sequence lengths
'''
lengths = {}
for asm_name, asm_path in asms_paths.items():
lengths[asm_name] = []
records = seqrecords(asm_path)
for record in records:
lengths[asm_name].append(len(record.seq))
return lengths
def gc_contents(asms_paths):
'''
Accepts path to multifasta, returns OrderedDict of sequence GC content
'''
gc_contents = {}
for asm_name, asm_path in asms_paths.items():
records = seqrecords(asm_path)
gc_contents[asm_name] = []
for record in seqrecords:
gc_contents[record.id].append(SeqUtils.GC(record.seq)/100)
return gc_contents
def marker_metadata(asms_paths, lengths, gc_contents, taxa):
'''
Returns summary metadata for each sequence
'''
metadata = {}
for asm_name, asm_path in asms_paths.items():
metadata[asm_name] = []
records = seqrecords(asm_path)
for i, record in enumerate(records):
lineage = ';'.join(taxa[asm_name][record.id][1]) if taxa[asm_name][record.id][1] else ''
lineage_fmt = (lineage[:40] + '..') if len(lineage) > 50 else lineage
text = (
'{}<br>'
'lca: {} (taxid: {})<br>'
'lineage: {}<br>'
'length: {}<br>'
'gc_content: {}<br>'
''.format(record.id,
taxa[asm_name][record.id][0],
0,
#taxa[asm_name][record.id][2]['tax_id'],
lineage_fmt,
lengths[asm_name][i],
round(float(gc_contents[asm_name][i]), 3)))
metadata[asm_name].append(text)
return metadata
def build_ebi_blast_query(title, sequence, database):
'''
Returns dict of REST params for the EBI BLAST API
'''
logger.info('building query')
return { 'email': '[email protected]',
'program': 'blastn',
'stype': 'dna',
'database': database,
'align': 6,
'match_scores': '1,-3',
'gapopen': 5,
'gapextend': 2,
'exp': '1e-10',
'filter': 'T',
'dropoff': 0,
'scores': 5,
'alignments': 5,
'title': title,
'sequence': str(sequence) }
def parse_hits(title, raw_hits):
'''
Returns list of tuples of BLAST hits
[(blast, tab, output, fields), (blast, tab, output, fields)]
'''
hits = []
for line in io.StringIO(raw_hits):
if ':' in line:
fields = [field.strip() for field in line.split('\t')]
hit = (title, ) + tuple(fields[1].split(':')) + tuple(fields[2:])
hits.append(hit)
return hits
def fetch_annotation(database, accession):
'''
Return SeqRecord of annotation for given EMBL accession number
'''
query = 'http://www.ebi.ac.uk/Tools/dbfetch/dbfetch/{}/{}'.format(database, accession)
request = requests.get(query)
annotation = SeqIO.read(io.StringIO(request.text), 'embl')
return annotation
def ebi_blast(query):
'''
Returns BLAST hits as a tuple containing a list of tuples for each hit
('seq', [(blast, tab, output, fields),
(blast, tab, output, fields)])
'''
run_url = 'http://www.ebi.ac.uk/Tools/services/rest/ncbiblast/run/'
status_url = 'http://www.ebi.ac.uk/Tools/services/rest/ncbiblast/status/'
results_url = 'http://www.ebi.ac.uk/Tools/services/rest/ncbiblast/result/'
call = requests.post(run_url, data=query)
start_time = time.time()
logger.info('dispatched blast jobid: ' + call.text)
while True:
status = requests.get(status_url + call.text)
if status.text == 'FINISHED':
hits_r = requests.get(results_url + call.text + '/out')
hits = parse_hits(query['title'], hits_r.text)
logger.info(status.text + ' ' + call.text)
logger.info('Job completed in ' + str(time.time() - start_time))
break
elif time.time() - start_time > 180:
print('blast timeout')
logger.error('blast timeout')
break
elif status.text == 'RUNNING':
time.sleep(2)
else:
print('status: ' + status.text)
logger.error('status: ' + status.text)
break
return (query['title'], hits)
def ebi_annotated_blast(query):
'''
Returns BLAST hits as a tuple containing a list of tuples of hit tuples and SeqRecord annotations
('seq', [((blast, tab, output, fields), SeqRecord),
((blast, tab, output, fields), SeqRecord)])
'''
run_url = 'http://www.ebi.ac.uk/Tools/services/rest/ncbiblast/run/'
status_url = 'http://www.ebi.ac.uk/Tools/services/rest/ncbiblast/status/'
results_url = 'http://www.ebi.ac.uk/Tools/services/rest/ncbiblast/result/'
call = requests.post(run_url, data=query)
start_time = time.time()
logger.info('dispatched blast jobid: ' + call.text)
while True:
status = requests.get(status_url + call.text)
if status.text == 'FINISHED':
hits_r = requests.get(results_url + call.text + '/out')
hits = parse_hits(query['title'], hits_r.text)
annotations_items = [hit[1] + hit[2] for hit in hits] # all there
annotations = list(fetch_annotation(hit[1], hit[2]) for hit in hits)
hits_annotations = list(zip(hits, annotations))
# hits_annotations = list(zip(hits)) TESTING WITHOUT SEQRECORD
print('\t\tQuery ' + query['title'])
logger.info(status.text + ' ' + call.text)
# print(time.time() - start_time)
break
elif time.time() - start_time > 180:
logger.error('blast timeout')
hits_annotations = None
break
elif status.text == 'RUNNING':
time.sleep(5)
else:
logger.error('status: ' + status.text)
hits_annotations = None
break
return (query['title'], hits_annotations)
def fasta_blaster(fasta, database, max_seqs, min_len):
'''
NEEDS UPDATING FOR NESTED ORDEREDDICTS
MIN_LEN NEEDS IMPLEMENTING
Returns BLAST results as an OrderedDict of ebi_blast() or ebi_annotated_blast() output
ebi_blast():
OrderedDict([('seq_1', [(blast, tab, output, fields),
(blast, tab, output, fields)],
('seq_2', [(blast, tab, output, fields),
(blast, tab, output, fields)])])
ebi_annotated_blast():
OrderedDict([('seq_1', [((blast, tab, output, fields), SeqRecord),
((blast, tab, output, fields), SeqRecord)],
('seq_2', [((blast, tab, output, fields), SeqRecord),
((blast, tab, output, fields), SeqRecord)])])
'''
records = OrderedDict()
with open(fasta, 'r') as fasta_file:
for record in SeqIO.parse(fasta_file, 'fasta'):
if len(record.seq) >= min_len:
records[record.id] = record.seq
queries = [build_ebi_blast_query(title, seq, database) for title, seq in records.items()]
with multiprocessing.Pool(30) as pool:
results = pool.map(ebi_annotated_blast, queries[0:max_seqs+1])
if len(queries) > max_seqs:
results += zip([q['title'] for q in queries[max_seqs+1:]], [None]*len(queries[max_seqs+1:]))
return OrderedDict(results)
def blast_assemblies(asms_paths, database, max_seqs, min_len):
'''
Returns BLAST hit information for a dict of assembly names and corresponding paths
'''
print('BLASTing assemblies...')
sample_results = OrderedDict()
for asm_name, asm_path in asms_paths.items():
print('\tAssembly {}'.format(asm_name))
sample_results[asm_name] = fasta_blaster(asm_path, database, max_seqs, min_len)
return sample_results
def blast_superkingdoms(blast_results):
asms_superkingdoms = {}
for asm, contigs in blast_results.items():
asm_superkingdoms = []
for contig, hits in contigs.items():
if hits: # Hits found
top_hit_superkingdom = hits[0][1].annotations['taxonomy'][0]
asm_superkingdoms.append(top_hit_superkingdom)
elif type(hits) is list: # Zero hits
asm_superkingdoms.append(False)
elif hits is None: # Not searched
asm_superkingdoms.append(None)
assert hits or type(hits) is list or hits is None
asms_superkingdoms[asm] = asm_superkingdoms
return asms_superkingdoms
def blast_summary(blast_results, asms_covs):
asms_summaries = {}
for asm, contigs in blast_results.items():
asm_summaries = []
for i, (contig, hits) in enumerate(contigs.items()):
if hits: # Hits found
description = hits[0][1].description[:40] + (hits[0][1].description[40:] and '…')
top_hit_summary = (''
'{2}<br>Coverage: {0} reads<br>{1}<br>Accession: {3}:{4}'
'<br>Identity: {5}%<br>Alignment length: {6}<br>Mismatches: {7}<br>'
'E-value: {13}'.format(asms_covs[asm][i], description, *hits[0][0]))
asm_summaries.append(top_hit_summary)
elif type(hits) is list: # Zero hits
asm_summaries.append(False)
elif hits is None: # Not searched
asm_summaries.append(None)
assert hits or type(hits) is list or hits is None
asms_summaries[asm] = asm_summaries
return asms_summaries
def plotly(asms_names, asms_stats, lca, blast, params):
cov_max = max(sum([i for i in asms_stats['covs'].values()], []))
cov_scale_factor = round(cov_max/5000, 1) # For bubble scaling
traces = []
for asm_name in asms_names:
if lca:
traces.append(
go.Scatter(
x=asms_stats['lens'][asm_name],
y=asms_stats['gc'][asm_name],
mode='markers',
name=asm_name,
text=asms_stats['legend'][asm_name],
line=dict(shape='spline'),
marker=dict(
opacity=0.5,
symbol='circle',
sizemode='area',
sizeref=cov_scale_factor,
size=asms_stats['covs'][asm_name],
line=dict(width=1))))
elif blast:
traces.append(
go.Scatter(
x=asms_stats['lens'][asm_name],
y=asms_stats['gc'][asm_name],
mode='markers',
name=asm_name,
text=asms_stats['blast_summary'][asm_name],
line=dict(shape='spline'),
marker=dict(
opacity=0.5,
symbol='circle',
sizemode='area',
sizeref=cov_scale_factor,
size=asms_stats['covs'][asm_name],
line=dict(width=1))))
else:
traces.append(
go.Scatter(
x=asms_stats['lens'][asm_name],
y=asms_stats['gc'][asm_name],
mode='markers',
name=asm_name,
text=asms_stats['names'][asm_name],
line=dict(shape='spline'),
marker=dict(
opacity=0.5,
symbol='circle',
sizemode='area',
sizeref=cov_scale_factor,
size=asms_stats['covs'][asm_name],
line=dict(width=1))))
layout = go.Layout(
title='Contig length vs. GC content vs. coverage',
xaxis=dict(
title='Contig length',
gridcolor='rgb(255, 255, 255)',
zerolinewidth=1,
type='log',
gridwidth=2),
yaxis=dict(
title='GC content',
gridcolor='rgb(255, 255, 255)',
zerolinewidth=1,
gridwidth=2),
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)')
fig = go.Figure(data=traces, layout=layout)
return py.plot(fig, filename=params['out'] + '/plot.html')
def report(chart_url, start_time, end_time, params):
elapsed_time = end_time - start_time
report_content = 'wall_time\t{}\nchart_url\t{}'.format(elapsed_time, chart_url)
with open(params['out'] + '/summary.txt', 'w') as report:
report.write(report_content)
print(report_content)
def main(
debug=False,
fwd_fq=None, rev_fq=None,
qual_trim=False,
lca=False, blast=False,
norm_c_list=None, norm_k_list=None,
asm_k_list=None, no_norm=False,
onecodex_api_key='a1d32ce32583468192101cc1d0cf27ec',
blast_db='em_rel', blast_max_seqs=5, min_len=500,
out_prefix='sparna', threads=4):
start_time = int(time.time())
params = dict(name=name_sample(fwd_fq),
out=out_prefix + '_' + name_sample(fwd_fq),
pipe=os.path.dirname(os.path.realpath(__file__)),
qual_trim=qual_trim,
no_norm=no_norm,
norm_c=norm_c_list.split(',') if norm_c_list else None,
norm_k=norm_k_list.split(',') if norm_k_list else None,
asm_k=asm_k_list if asm_k_list else 0,
threads=threads)
if norm_k_list and norm_c_list:
norm_perms = [{'k':k, 'c':c} for k in params['norm_k'] for c in params['norm_c']]
asm_perms = [{'k':p['k'],'c':p['c']} for p in norm_perms]
else:
norm_perms = [{'k':'0', 'c':'0'}]
asm_perms = [{'k':'0', 'c':'0'}]
for dir in ['raw', 'trim', 'norm', 'asm', 'asm_prune', 'remap', 'eval']:
if not os.path.exists(params['out'] + '/' + dir):
os.makedirs(params['out'] + '/' + dir)
import_reads(fwd_fq, rev_fq, params)
trim(norm_k_list, params)
if norm_k_list and norm_c_list:
norm_perms = normalise(norm_perms, params)
else:
norm_perms = None
asms_paths_full = assemble(asm_perms, params)
asms_paths = prune_assemblies(asms_paths_full, min_len, params)
asms_names = {a: [r.id for r in SeqIO.parse(p, 'fasta')] for a, p in asms_paths.items()}
asms_lens = lengths(asms_paths)
asms_covs = map_to_assemblies(asms_paths, params)
asms_gc = gc_content(asms_paths)
if lca:
lca_taxa = onecodex_assemblies(asms_paths, onecodex_api_key)
metadata_summaries = marker_metadata(asms_paths, asms_lens, asms_gc, lca_taxa)
asms_stats = dict(names=asms_names,
lens=asms_lens,
covs=asms_covs,
legend=metadata_summaries,
gc=asms_gc,
cpg=None)
# print('\npaths', asms_paths)
# print('\nasm_names', asms_names)
# print(len(asms_names['060-660_r1_Cap1_F.norm_k21c1.asm_k']))
# print(len(asms_names['060-660_r1_Cap1_F.norm_k21c10.asm_k']))
# print('\nlengths', asms_lens)
# print(len(asms_lens['060-660_r1_Cap1_F.norm_k21c1.asm_k']))
# print(len(asms_lens['060-660_r1_Cap1_F.norm_k21c10.asm_k']))
# print('\ncovs', asms_covs)
# print(len(asms_covs['060-660_r1_Cap1_F.norm_k21c1.asm_k']))
# print(len(asms_covs['060-660_r1_Cap1_F.norm_k21c10.asm_k']))
# print('\ngc', asms_gc)
# print(len(asms_gc['060-660_r1_Cap1_F.norm_k21c1.asm_k']))
# print(len(asms_gc['060-660_r1_Cap1_F.norm_k21c10.asm_k']))
# pprint.pprint(lca_taxa)
# pprint.pprint(metadata_summaries)
elif blast:
blast_results = blast_assemblies(asms_paths, blast_db, blast_max_seqs, min_len)
asms_stats = dict(names=asms_names,
lens=asms_lens,
covs=asms_covs,
blast_summary=blast_summary(blast_results, asms_covs),
blast_superkingdoms=blast_superkingdoms(blast_results),
gc=gc_content(asms_paths),
cpg=None)
else:
asms_stats = dict(names=asms_names,
lens=asms_lens,
covs=asms_covs,
gc=asms_gc,
cpg=None)
chart_url = plotly(asms_names, asms_stats, lca, blast, params)
report(chart_url, start_time, time.time(), params)
argh.dispatch_command(main) | gpl-3.0 |
davidgbe/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
amolkahat/pandas | pandas/tests/io/test_excel.py | 2 | 98525 | # pylint: disable=E1101
import os
import warnings
from datetime import datetime, date, time, timedelta
from distutils.version import LooseVersion
from functools import partial
from warnings import catch_warnings
from collections import OrderedDict
import numpy as np
import pytest
from numpy import nan
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.compat import u, range, map, BytesIO, iteritems, PY36
from pandas.core.config import set_option, get_option
from pandas.io.common import URLError
from pandas.io.excel import (
ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _OpenpyxlWriter,
register_writer, _XlsxWriter
)
from pandas.io.formats.excel import ExcelFormatter
from pandas.io.parsers import read_csv
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
@td.skip_if_no('xlrd', '0.9')
class SharedItems(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data")
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def get_csv_refdf(self, basename):
"""
Obtain the reference data from read_csv with the Python engine.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
dfref : DataFrame
"""
pref = os.path.join(self.dirpath, basename + '.csv')
dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
return dfref
def get_excelfile(self, basename, ext):
"""
Return test data ExcelFile instance.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
excel : io.excel.ExcelFile
"""
return ExcelFile(os.path.join(self.dirpath, basename + ext))
def get_exceldf(self, basename, ext, *args, **kwds):
"""
Return test data DataFrame.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
df : DataFrame
"""
pth = os.path.join(self.dirpath, basename + ext)
return read_excel(pth, *args, **kwds)
class ReadingTestsBase(SharedItems):
# This is based on ExcelWriterBase
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_int(self, ext):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['A', 'B', 'C'])
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols=3)
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols=3)
with tm.assert_produces_warning(FutureWarning):
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df3, dfref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_list(self, ext):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['B', 'C'])
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols=[0, 2, 3])
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols=[0, 2, 3])
with tm.assert_produces_warning(FutureWarning):
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df3, dfref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_str(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = dfref.reindex(columns=['A', 'B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A:D')
with tm.assert_produces_warning(FutureWarning):
df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
tm.assert_frame_equal(df4, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C,D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C:D')
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
def test_excel_stop_iterator(self, ext):
parsed = self.get_exceldf('test2', ext, 'Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, ext):
parsed = self.get_exceldf('test3', ext, 'Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_passes_na(self, ext):
excel = self.get_excelfile('test4', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
# 13967
excel = self.get_excelfile('test5', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_deprecated_sheetname(self, ext):
# gh-17964
excel = self.get_excelfile('test1', ext)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
read_excel(excel, sheetname='Sheet1')
with pytest.raises(TypeError):
read_excel(excel, sheet='Sheet1')
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_excel_table_sheet_by_index(self, ext):
excel = self.get_excelfile('test1', ext)
dfref = self.get_csv_refdf('test1')
df1 = read_excel(excel, 0, index_col=0)
df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df1 = excel.parse(0, index_col=0)
df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
tm.assert_frame_equal(df3, df4)
df3 = excel.parse(0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
import xlrd
with pytest.raises(xlrd.XLRDError):
read_excel(excel, 'asdf')
def test_excel_table(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0)
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, ext):
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
]))
basename = 'test_types'
# should read in correctly and infer types
actual = self.get_exceldf(basename, ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = self.get_exceldf(
basename, ext, 'Sheet1', converters={"StrCol": str})
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False,
converters={"StrCol": str})
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, ext):
basename = 'test_converters'
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
]))
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
2: lambda x: 'Found' if x != '' else 'Not found',
3: lambda x: str(x) if x else '',
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = self.get_exceldf(basename, ext, 'Sheet1',
converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, ext):
# GH 8212
basename = 'testdtype'
actual = self.get_exceldf(basename, ext)
expected = DataFrame({
'a': [1, 2, 3, 4],
'b': [2.5, 3.5, 4.5, 5.5],
'c': [1, 2, 3, 4],
'd': [1.0, 2.0, np.nan, 4.0]}).reindex(
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf(basename, ext,
dtype={'a': 'float64',
'b': 'float32',
'c': str})
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype('float32')
expected['c'] = ['001', '002', '003', '004']
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
self.get_exceldf(basename, ext, dtype={'d': 'int64'})
@pytest.mark.parametrize("dtype,expected", [
(None,
DataFrame({
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0]
})),
({"a": "float64",
"b": "float32",
"c": str,
"d": str
},
DataFrame({
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"]
})),
])
def test_reader_dtype_str(self, ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = self.get_exceldf(basename, ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = 'test_multisheet'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ['Charlie', 'Alpha', 'Beta']
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = 'test_multisheet'
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, 'Charlie', 'Charlie']
dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = 'blank_with_header'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, ext):
actual = self.get_exceldf('blank', ext, 'Sheet1')
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, ext):
expected = DataFrame(columns=['col_1', 'col_2'])
actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
# GH 12292 : error when read one empty column from excel file
def test_read_one_empty_col_no_header(self, ext):
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'no_header', index=False, header=False)
actual_header_none = read_excel(
path,
'no_header',
usecols=[0],
header=None
)
actual_header_zero = read_excel(
path,
'no_header',
usecols=[0],
header=0
)
expected = DataFrame()
tm.assert_frame_equal(actual_header_none, expected)
tm.assert_frame_equal(actual_header_zero, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_read_one_empty_col_with_header(self, ext):
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'with_header', index=False, header=True)
actual_header_none = read_excel(
path,
'with_header',
usecols=[0],
header=None
)
actual_header_zero = read_excel(
path,
'with_header',
usecols=[0],
header=0
)
expected_header_none = DataFrame(pd.Series([0], dtype='int64'))
tm.assert_frame_equal(actual_header_none, expected_header_none)
expected_header_zero = DataFrame(columns=[0])
tm.assert_frame_equal(actual_header_zero, expected_header_zero)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
[3, 'baz']], columns=['a', 'b'])
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, 'Data_no_head',
header=False, index=False)
refdf.to_excel(writer, 'Data_with_head', index=False)
refdf.columns = ['A', 'B']
with ExcelFile(pth) as reader:
xlsdf_no_head = read_excel(reader, 'Data_no_head',
header=None, names=['A', 'B'])
xlsdf_with_head = read_excel(reader, 'Data_with_head',
index_col=None, names=['A', 'B'])
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
def test_date_conversion_overflow(self, ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
[pd.Timestamp('2016-03-16'), 'Jack Black'],
[1e+20, 'Timothy Brown']],
columns=['DateColWithBigInt', 'StringCol'])
result = self.get_exceldf('testdateoverflow', ext)
tm.assert_frame_equal(result, expected)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_sheet_name_and_sheetname(self, ext):
# GH10559: Minor improvement: Change "sheet_name" to "sheetname"
# GH10969: DOC: Consistent var names (sheetname vs sheet_name)
# GH12604: CLN GH10559 Rename sheetname variable to sheet_name
# GH20920: ExcelFile.parse() and pd.read_xlsx() have different
# behavior for "sheetname" argument
dfref = self.get_csv_refdf('test1')
df1 = self.get_exceldf('test1', ext,
sheet_name='Sheet1') # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2 = self.get_exceldf('test1', ext,
sheetname='Sheet1') # bkwrd compat
excel = self.get_excelfile('test1', ext)
df1_parse = excel.parse(sheet_name='Sheet1') # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2_parse = excel.parse(sheetname='Sheet1') # bkwrd compat
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df1_parse, dfref, check_names=False)
tm.assert_frame_equal(df2_parse, dfref, check_names=False)
def test_sheet_name_both_raises(self, ext):
with tm.assert_raises_regex(TypeError, "Cannot specify both"):
self.get_exceldf('test1', ext, sheetname='Sheet1',
sheet_name='Sheet1')
excel = self.get_excelfile('test1', ext)
with tm.assert_raises_regex(TypeError, "Cannot specify both"):
excel.parse(sheetname='Sheet1',
sheet_name='Sheet1')
@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
class TestXlrdReader(ReadingTestsBase):
"""
This is the base class for the xlrd tests, and 3 different file formats
are supported: xls, xlsx, xlsm
"""
def test_excel_read_buffer(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(pth, 'Sheet1', index_col=0)
with open(pth, 'rb') as f:
actual = read_excel(f, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
with open(pth, 'rb') as f:
xls = ExcelFile(f)
actual = read_excel(xls, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('xlwt')
def test_read_xlrd_Book(self, ext):
import xlrd
df = self.frame
with ensure_clean('.xls') as pth:
df.to_excel(pth, "SheetA")
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine="xlrd") as xl:
result = read_excel(xl, "SheetA")
tm.assert_frame_equal(df, result)
result = read_excel(book, sheet_name="SheetA", engine="xlrd")
tm.assert_frame_equal(df, result)
@tm.network
def test_read_from_http_url(self, ext):
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/data/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no('s3fs')
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, ext):
boto3 = pytest.importorskip('boto3')
moto = pytest.importorskip('moto')
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="pandas-test")
file_name = os.path.join(self.dirpath, 'test1' + ext)
with open(file_name, 'rb') as f:
conn.Bucket("pandas-test").put_object(Key="test1" + ext,
Body=f)
url = ('s3://pandas-test/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, ext):
# FILE
localtable = os.path.join(self.dirpath, 'test1' + ext)
local_table = read_excel(localtable)
try:
url_table = read_excel('file://localhost/' + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self, ext):
# GH12655
from pathlib import Path
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
path_obj = Path(self.dirpath, 'test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self, ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
abs_dir = os.path.abspath(self.dirpath)
path_obj = LocalPath(abs_dir).join('test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_closes_file(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
f = open(pth, 'rb')
with ExcelFile(f) as xlsx:
# parses okay
read_excel(xlsx, 'Sheet1', index_col=0)
assert f.closed
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_creating_and_reading_multiple_sheets(self, ext):
# Test reading multiple sheets, from a runtime created excel file
# with multiple sheets.
# See PR #9450
def tdf(sheetname):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[sheetname])
sheets = ['AAA', 'BBB', 'CCC']
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in iteritems(dfs):
df.to_excel(ew, sheetname)
dfs_returned = read_excel(pth, sheet_name=sheets)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
def test_reader_seconds(self, ext):
import xlrd
# Test reading times with and without milliseconds. GH5945.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
# Xlrd >= 0.9.3 can handle Excel milliseconds.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54)]})
else:
# Xlrd < 0.9.3 rounds Excel milliseconds.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56),
time(4, 29, 49),
time(6, 13, 42),
time(7, 57, 35),
time(9, 41, 29),
time(11, 25, 22),
time(13, 9, 15),
time(14, 53, 8),
time(16, 37, 1),
time(18, 20, 54)]})
actual = self.get_exceldf('times_1900', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf('times_1904', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, ext):
# GH 4679
mi = MultiIndex.from_product([['foo', 'bar'], ['a', 'b']])
mi_file = os.path.join(self.dirpath, 'testmultiindex' + ext)
expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
[2, 3.5, pd.Timestamp('2015-01-02'), False],
[3, 4.5, pd.Timestamp('2015-01-03'), False],
[4, 5.5, pd.Timestamp('2015-01-04'), True]],
columns=mi)
actual = read_excel(mi_file, 'mi_column', header=[0, 1])
tm.assert_frame_equal(actual, expected)
actual = read_excel(mi_file, 'mi_column', header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
expected.columns = ['a', 'b', 'c', 'd']
expected.index = mi
actual = read_excel(mi_file, 'mi_index', index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
expected.columns = mi
actual = read_excel(mi_file, 'both', index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
expected.index = mi.set_names(['ilvl1', 'ilvl2'])
expected.columns = ['a', 'b', 'c', 'd']
actual = read_excel(mi_file, 'mi_index_name', index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
expected.index = list(range(4))
expected.columns = mi.set_names(['c1', 'c2'])
actual = read_excel(mi_file, 'mi_column_name',
header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# Issue #11317
expected.columns = mi.set_levels(
[1, 2], level=1).set_names(['c1', 'c2'])
actual = read_excel(mi_file, 'name_with_int',
index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
expected.columns = mi.set_names(['c1', 'c2'])
expected.index = mi.set_names(['ilvl1', 'ilvl2'])
actual = read_excel(mi_file, 'both_name',
index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
actual = read_excel(mi_file, 'both_name',
index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
actual = read_excel(mi_file, 'both_name_skiprows', index_col=[0, 1],
header=[0, 1], skiprows=2)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no('xlsxwriter')
def test_read_excel_multiindex_empty_level(self, ext):
# GH 12453
with ensure_clean('.xlsx') as path:
df = DataFrame({
('One', 'x'): {0: 1},
('Two', 'X'): {0: 3},
('Two', 'Y'): {0: 7},
('Zero', ''): {0: 0}
})
expected = DataFrame({
('One', u'x'): {0: 1},
('Two', u'X'): {0: 3},
('Two', u'Y'): {0: 7},
('Zero', 'Unnamed: 3_level_1'): {0: 0}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1])
tm.assert_frame_equal(actual, expected)
df = pd.DataFrame({
('Beg', ''): {0: 0},
('Middle', 'x'): {0: 1},
('Tail', 'X'): {0: 3},
('Tail', 'Y'): {0: 7}
})
expected = pd.DataFrame({
('Beg', 'Unnamed: 0_level_1'): {0: 0},
('Middle', u'x'): {0: 1},
('Tail', u'X'): {0: 3},
('Tail', u'Y'): {0: 7}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1])
tm.assert_frame_equal(actual, expected)
@td.skip_if_no('xlsxwriter')
def test_excel_multindex_roundtrip(self, ext):
# GH 4679
with ensure_clean('.xlsx') as pth:
for c_idx_names in [True, False]:
for r_idx_names in [True, False]:
for c_idx_levels in [1, 3]:
for r_idx_levels in [1, 3]:
# column index name can't be serialized unless
# MultiIndex
if (c_idx_levels == 1 and c_idx_names):
continue
# empty name case current read in as unnamed
# levels, not Nones
check_names = True
if not r_idx_names and r_idx_levels > 1:
check_names = False
df = mkdf(5, 5, c_idx_names,
r_idx_names, c_idx_levels,
r_idx_levels)
df.to_excel(pth)
act = pd.read_excel(
pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(
df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(
df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(
df, act, check_names=check_names)
def test_excel_old_index_format(self, ext):
# see gh-4679
filename = 'test_index_name_pre17' + ext
in_file = os.path.join(self.dirpath, filename)
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array([[None, None, None, None, None],
['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'],
['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'],
['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'],
['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'],
['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']])
columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4']
mi = MultiIndex(levels=[['R0', 'R_l0_g0', 'R_l0_g1',
'R_l0_g2', 'R_l0_g3', 'R_l0_g4'],
['R1', 'R_l1_g0', 'R_l1_g1',
'R_l1_g2', 'R_l1_g3', 'R_l1_g4']],
labels=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None])
si = Index(['R0', 'R_l0_g0', 'R_l0_g1', 'R_l0_g2',
'R_l0_g3', 'R_l0_g4'], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, 'single_names')
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, 'multi_names')
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array([['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'],
['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'],
['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'],
['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'],
['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']])
columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4']
mi = MultiIndex(levels=[['R_l0_g0', 'R_l0_g1', 'R_l0_g2',
'R_l0_g3', 'R_l0_g4'],
['R_l1_g0', 'R_l1_g1', 'R_l1_g2',
'R_l1_g3', 'R_l1_g4']],
labels=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None])
si = Index(['R_l0_g0', 'R_l0_g1', 'R_l0_g2',
'R_l0_g3', 'R_l0_g4'], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, 'single_no_names')
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
header=arg)
def test_read_excel_chunksize(self, ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
chunksize=100)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_read_excel_parse_dates(self, ext):
# GH 11544, 12051
df = DataFrame(
{'col': [1, 2, 3],
'date_strings': pd.date_range('2012-01-01', periods=3)})
df2 = df.copy()
df2['date_strings'] = df2['date_strings'].dt.strftime('%m/%d/%Y')
with ensure_clean(ext) as pth:
df2.to_excel(pth)
res = read_excel(pth)
tm.assert_frame_equal(df2, res)
# no index_col specified when parse_dates is True
with tm.assert_produces_warning():
res = read_excel(pth, parse_dates=True)
tm.assert_frame_equal(df2, res)
res = read_excel(pth, parse_dates=['date_strings'], index_col=0)
tm.assert_frame_equal(df, res)
dateparser = lambda x: pd.datetime.strptime(x, '%m/%d/%Y')
res = read_excel(pth, parse_dates=['date_strings'],
date_parser=dateparser, index_col=0)
tm.assert_frame_equal(df, res)
def test_read_excel_skiprows_list(self, ext):
# GH 4903
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=[0, 2])
expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
[2, 3.5, pd.Timestamp('2015-01-02'), False],
[3, 4.5, pd.Timestamp('2015-01-03'), False],
[4, 5.5, pd.Timestamp('2015-01-04'), True]],
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=np.array([0, 2]))
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, ext):
# GH 16645
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows='5')
def test_read_excel_squeeze(self, ext):
# GH 12157
f = os.path.join(self.dirpath, 'test_squeeze' + ext)
actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
expected.index.name = 'a'
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, 'two_columns', squeeze=True)
expected = pd.DataFrame({'a': [4, 5, 6],
'b': [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, 'one_column', squeeze=True)
expected = pd.Series([1, 2, 3], name='a')
tm.assert_series_equal(actual, expected)
class _WriterBase(SharedItems):
@pytest.fixture(autouse=True)
def set_engine_and_path(self, request, merge_cells, engine, ext):
"""Fixture to set engine and open file for use in each test case
Rather than requiring `engine=...` to be provided explicitly as an
argument in each test, this fixture sets a global option to dictate
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
It also uses a context manager to open a temporary excel file for
the function to write to, accessible via `self.path`
Notes
-----
This fixture will run as part of each test method defined in the
class and any subclasses, on account of the `autouse=True`
argument
"""
option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.'))
prev_engine = get_option(option_name)
set_option(option_name, engine)
with ensure_clean(ext) as path:
self.path = path
yield
set_option(option_name, prev_engine) # Roll back option change
@pytest.mark.parametrize("merge_cells", [True, False])
@pytest.mark.parametrize("engine,ext", [
pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('xlwt', '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt')),
pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter'))
])
class TestExcelWriter(_WriterBase):
# Base class for test cases to run with different Excel writers.
def test_excel_sheet_by_name_raise(self, merge_cells, engine, ext):
import xlrd
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(self.path)
xl = ExcelFile(self.path)
df = read_excel(xl, 0)
tm.assert_frame_equal(gt, df)
with pytest.raises(xlrd.XLRDError):
read_excel(xl, '0')
def test_excelwriter_contextmanager(self, merge_cells, engine, ext):
with ExcelWriter(self.path) as writer:
self.frame.to_excel(writer, 'Data1')
self.frame2.to_excel(writer, 'Data2')
with ExcelFile(self.path) as reader:
found_df = read_excel(reader, 'Data1')
found_df2 = read_excel(reader, 'Data2')
tm.assert_frame_equal(found_df, self.frame)
tm.assert_frame_equal(found_df2, self.frame2)
def test_roundtrip(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(self.path, 'test1')
recons = read_excel(self.path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', index=False)
recons = read_excel(self.path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='NA')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
# GH 3611
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['88'])
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0,
na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
# GH 6573
self.frame.to_excel(self.path, 'Sheet1')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, '0')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
# GH 8825 Pandas Series should provide to_excel method
s = self.frame["A"]
s.to_excel(self.path)
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(s.to_frame(), recons)
def test_mixed(self, merge_cells, engine, ext):
self.mixed_frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_tsframe(self, merge_cells, engine, ext):
df = tm.makeTimeDataFrame()[:5]
df.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
@pytest.mark.parametrize("np_type", [
np.int8, np.int16, np.int32, np.int64])
def test_int_types(self, merge_cells, engine, ext, np_type):
# Test np.int values read come back as int (rather than float
# which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
int_frame = frame.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = read_excel(self.path, 'test1')
tm.assert_frame_equal(int_frame, recons2)
# test with convert_float=False comes back as float
float_frame = frame.astype(float)
recons = read_excel(self.path, 'test1', convert_float=False)
tm.assert_frame_equal(recons, float_frame,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize("np_type", [
np.float16, np.float32, np.float64])
def test_float_types(self, merge_cells, engine, ext, np_type):
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1').astype(np_type)
tm.assert_frame_equal(frame, recons, check_dtype=False)
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, merge_cells, engine, ext, np_type):
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1').astype(np_type)
tm.assert_frame_equal(frame, recons)
def test_inf_roundtrip(self, merge_cells, engine, ext):
frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(frame, recons)
def test_sheets(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(self.path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = read_excel(reader, 'test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
assert 2 == len(reader.sheet_names)
assert 'test1' == reader.sheet_names[0]
assert 'test2' == reader.sheet_names[1]
def test_colaliases(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(self.path, 'test1', header=col_aliases)
reader = ExcelFile(self.path)
rs = read_excel(reader, 'test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path, 'test1',
index_label=['test'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label=['test', 'dummy', 'dummy2'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label='test',
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
self.frame.to_excel(self.path,
'test1',
columns=['A', 'B', 'C', 'D'],
index=False, merge_cells=merge_cells)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self, merge_cells, engine, ext):
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
df.to_excel(self.path, merge_cells=merge_cells)
xf = ExcelFile(self.path)
result = read_excel(xf, xf.sheet_names[0],
index_col=0)
tm.assert_frame_equal(result, df)
assert result.index.name == 'foo'
def test_excel_roundtrip_datetime(self, merge_cells, engine, ext):
# datetime.date, not sure what to test here exactly
tsf = self.tsframe.copy()
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(self.tsframe, recons)
# GH4133 - excel output format strings
def test_excel_date_datetime_format(self, merge_cells, engine, ext):
df = DataFrame([[date(2014, 1, 31),
date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
df_expected = DataFrame([[datetime(2014, 1, 31),
datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
with ensure_clean(ext) as filename2:
writer1 = ExcelWriter(self.path)
writer2 = ExcelWriter(filename2,
date_format='DD.MM.YYYY',
datetime_format='DD.MM.YYYY HH-MM-SS')
df.to_excel(writer1, 'test1')
df.to_excel(writer2, 'test1')
writer1.close()
writer2.close()
reader1 = ExcelFile(self.path)
reader2 = ExcelFile(filename2)
rs1 = read_excel(reader1, 'test1', index_col=None)
rs2 = read_excel(reader2, 'test1', index_col=None)
tm.assert_frame_equal(rs1, rs2)
# since the reader returns a datetime object for dates, we need
# to use df_expected to check the result
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_interval_no_labels(self, merge_cells, engine, ext):
# GH19242 - test writing Interval without labels
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
frame['new'] = pd.cut(frame[0], 10)
expected['new'] = pd.cut(expected[0], 10).astype(str)
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(expected, recons)
def test_to_excel_interval_labels(self, merge_cells, engine, ext):
# GH19242 - test writing Interval with labels
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
intervals = pd.cut(frame[0], 10, labels=['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J'])
frame['new'] = intervals
expected['new'] = pd.Series(list(intervals))
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(expected, recons)
def test_to_excel_timedelta(self, merge_cells, engine, ext):
# GH 19242, GH9155 - test writing timedelta to xls
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
columns=['A'],
dtype=np.int64
)
expected = frame.copy()
frame['new'] = frame['A'].apply(lambda x: timedelta(seconds=x))
expected['new'] = expected['A'].apply(
lambda x: timedelta(seconds=x).total_seconds() / float(86400))
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(expected, recons)
def test_to_excel_periodindex(self, merge_cells, engine, ext):
frame = self.tsframe
xp = frame.resample('M', kind='period').mean()
xp.to_excel(self.path, 'sht1')
reader = ExcelFile(self.path)
rs = read_excel(reader, 'sht1', index_col=0)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
frame.to_excel(self.path, 'test1', header=False)
frame.to_excel(self.path, 'test1', columns=['A', 'B'])
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext):
frame = pd.DataFrame({'A': [None, 2, 3],
'B': [10, 20, 30],
'C': np.random.sample(3)})
frame = frame.set_index(['A', 'B'])
frame.to_excel(self.path, merge_cells=merge_cells)
df = read_excel(self.path, index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# Test for Issue 11328. If column indices are integers, make
# sure they are handled correctly for either setting of
# merge_cells
def test_to_excel_multiindex_cols(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
(50, 1), (50, 2)])
frame.columns = new_cols_index
header = [0, 1]
if not merge_cells:
header = 0
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', header=header,
index_col=[0, 1])
if not merge_cells:
fm = frame.columns.format(sparsify=False,
adjoin=False, names=False)
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self, merge_cells, engine, ext):
# try multiindex with dates
tsframe = self.tsframe.copy()
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.index.names = ['time', 'foo']
tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
assert recons.index.names == ('time', 'foo')
def test_to_excel_multiindex_no_write_index(self, merge_cells, engine,
ext):
# Test writing and re-reading a MI witout the index. GH 5616.
# Initial non-MI frame.
frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
# Write out to Excel without the index.
frame2.to_excel(self.path, 'test1', index=False)
# Read it back in.
reader = ExcelFile(self.path)
frame3 = read_excel(reader, 'test1')
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self, merge_cells, engine, ext):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
df.to_excel(self.path, 'test1', float_format='%.2f')
reader = ExcelFile(self.path)
rs = read_excel(reader, 'test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
def test_to_excel_output_encoding(self, merge_cells, engine, ext):
# avoid mixed inferred_type
df = DataFrame([[u'\u0192', u'\u0193', u'\u0194'],
[u'\u0195', u'\u0196', u'\u0197']],
index=[u'A\u0192', u'B'],
columns=[u'X\u0193', u'Y', u'Z'])
with ensure_clean('__tmp_to_excel_float_format__.' + ext) as filename:
df.to_excel(filename, sheet_name='TestSheet', encoding='utf8')
result = read_excel(filename, 'TestSheet', encoding='utf8')
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self, merge_cells, engine, ext):
with ensure_clean(u('\u0192u.') + ext) as filename:
try:
f = open(filename, 'wb')
except UnicodeEncodeError:
pytest.skip('no unicode file names on this system')
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = read_excel(reader, 'test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
# def test_to_excel_header_styling_xls(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# pytest.skip
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# assert ["test1"] == wbk.sheet_names()
# ws = wbk.sheet_by_name('test1')
# assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# assert 1 == font[cell_xf.font_index].bold
# assert 1 == cell_xf.border.top_line_style
# assert 1 == cell_xf.border.right_line_style
# assert 1 == cell_xf.border.bottom_line_style
# assert 1 == cell_xf.border.left_line_style
# assert 2 == cell_xf.alignment.hor_align
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# pytest.skip
# if openpyxl.__version__ < '1.6.1':
# pytest.skip
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# assert ["test1"] == wbk.get_sheet_names()
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# assert cell.style.font.bold
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.top.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.right.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.bottom.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.left.border_style)
# assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# assert ws.cell(maddr).merged
# os.remove(filename)
def test_excel_010_hemstring(self, merge_cells, engine, ext):
if merge_cells:
pytest.skip('Skip tests for merged MI format.')
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
def roundtrip(df, header=True, parser_hdr=0, index=True):
df.to_excel(self.path, header=header,
merge_cells=merge_cells, index=index)
xf = ExcelFile(self.path)
res = read_excel(xf, xf.sheet_names[0], header=parser_hdr)
return res
nrows = 5
ncols = 3
for use_headers in (True, False):
for i in range(1, 4): # row multindex up to nlevel=3
for j in range(1, 4): # col ""
df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
# this if will be removed once multi column excel writing
# is implemented for now fixing #9794
if j > 1:
with pytest.raises(NotImplementedError):
res = roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
if use_headers:
assert res.shape == (nrows, ncols + i)
else:
# first row taken as columns
assert res.shape == (nrows - 1, ncols + i)
# no nans
for r in range(len(res.index)):
for c in range(len(res.columns)):
assert res.iloc[r, c] is not np.nan
res = roundtrip(DataFrame([0]))
assert res.shape == (1, 1)
assert res.iloc[0, 0] is not np.nan
res = roundtrip(DataFrame([0]), False, None)
assert res.shape == (1, 2)
assert res.iloc[0, 0] is not np.nan
def test_excel_010_hemstring_raises_NotImplementedError(self, merge_cells,
engine, ext):
# This test was failing only for j>1 and header=False,
# So I reproduced a simple test.
if merge_cells:
pytest.skip('Skip tests for merged MI format.')
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
def roundtrip2(df, header=True, parser_hdr=0, index=True):
df.to_excel(self.path, header=header,
merge_cells=merge_cells, index=index)
xf = ExcelFile(self.path)
res = read_excel(xf, xf.sheet_names[0], header=parser_hdr)
return res
nrows = 5
ncols = 3
j = 2
i = 1
df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
with pytest.raises(NotImplementedError):
roundtrip2(df, header=False, index=False)
def test_duplicated_columns(self, merge_cells, engine, ext):
# Test for issue #5235
write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
colnames = ['A', 'B', 'B']
write_frame.columns = colnames
write_frame.to_excel(self.path, 'test1')
read_frame = read_excel(self.path, 'test1')
read_frame.columns = colnames
tm.assert_frame_equal(write_frame, read_frame)
# 11007 / #10970
write_frame = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'A', 'B'])
write_frame.to_excel(self.path, 'test1')
read_frame = read_excel(self.path, 'test1')
read_frame.columns = ['A', 'B', 'A', 'B']
tm.assert_frame_equal(write_frame, read_frame)
# 10982
write_frame.to_excel(self.path, 'test1', index=False, header=False)
read_frame = read_excel(self.path, 'test1', header=None)
write_frame.columns = [0, 1, 2, 3]
tm.assert_frame_equal(write_frame, read_frame)
def test_swapped_columns(self, merge_cells, engine, ext):
# Test for issue #5427.
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
write_frame.to_excel(self.path, 'test1', columns=['B', 'A'])
read_frame = read_excel(self.path, 'test1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
def test_invalid_columns(self, merge_cells, engine, ext):
# 10982
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
write_frame.to_excel(self.path, 'test1', columns=['B', 'C'])
expected = write_frame.reindex(columns=['B', 'C'])
read_frame = read_excel(self.path, 'test1')
tm.assert_frame_equal(expected, read_frame)
with pytest.raises(KeyError):
write_frame.to_excel(self.path, 'test1', columns=['C', 'D'])
def test_comment_arg(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument functionality to read_excel
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Read file without comment arg
result1 = read_excel(self.path, 'test_c')
result1.iloc[1, 0] = None
result1.iloc[1, 1] = None
result1.iloc[2, 1] = None
result2 = read_excel(self.path, 'test_c', comment='#')
tm.assert_frame_equal(result1, result2)
def test_comment_default(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument default to read_excel
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Read file with default and explicit comment=None
result1 = read_excel(self.path, 'test_c')
result2 = read_excel(self.path, 'test_c', comment=None)
tm.assert_frame_equal(result1, result2)
def test_comment_used(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument is working as expected when used
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Test read_frame_comment against manually produced expected output
expected = DataFrame({'A': ['one', None, 'one'],
'B': ['two', None, None]})
result = read_excel(self.path, 'test_c', comment='#')
tm.assert_frame_equal(result, expected)
def test_comment_emptyline(self, merge_cells, engine, ext):
# Re issue #18735
# Test that read_excel ignores commented lines at the end of file
df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
df.to_excel(self.path, index=False)
# Test that all-comment lines at EoF are ignored
expected = DataFrame({'a': [1], 'b': [2]})
result = read_excel(self.path, comment='#')
tm.assert_frame_equal(result, expected)
def test_datetimes(self, merge_cells, engine, ext):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
datetimes = [datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52)]
write_frame = DataFrame({'A': datetimes})
write_frame.to_excel(self.path, 'Sheet1')
read_frame = read_excel(self.path, 'Sheet1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
# GH7074
def test_bytes_io(self, merge_cells, engine, ext):
bio = BytesIO()
df = DataFrame(np.random.randn(10, 2))
# pass engine explicitly as there is no file path to infer from
writer = ExcelWriter(bio, engine=engine)
df.to_excel(writer)
writer.save()
bio.seek(0)
reread_df = read_excel(bio)
tm.assert_frame_equal(df, reread_df)
# GH8188
def test_write_lists_dict(self, merge_cells, engine, ext):
df = DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}],
'numeric': [1, 2, 3.0],
'str': ['apple', 'banana', 'cherry']})
expected = df.copy()
expected.mixed = expected.mixed.apply(str)
expected.numeric = expected.numeric.astype('int64')
df.to_excel(self.path, 'Sheet1')
read = read_excel(self.path, 'Sheet1', header=0)
tm.assert_frame_equal(read, expected)
# GH13347
def test_true_and_false_value_options(self, merge_cells, engine, ext):
df = pd.DataFrame([['foo', 'bar']], columns=['col1', 'col2'])
expected = df.replace({'foo': True,
'bar': False})
df.to_excel(self.path)
read_frame = read_excel(self.path, true_values=['foo'],
false_values=['bar'])
tm.assert_frame_equal(read_frame, expected)
def test_freeze_panes(self, merge_cells, engine, ext):
# GH15160
expected = DataFrame([[1, 2], [3, 4]], columns=['col1', 'col2'])
expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1))
result = read_excel(self.path)
tm.assert_frame_equal(expected, result)
def test_path_pathlib(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{}".format(ext))
tm.assert_frame_equal(df, result)
def test_path_localpath(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{}".format(ext))
tm.assert_frame_equal(df, result)
@td.skip_if_no('openpyxl')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'openpyxl')])
class TestOpenpyxlTests(_WriterBase):
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
from openpyxl import styles
hstyle = {
"font": {
"color": '00FF0000',
"bold": True,
},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {
"horizontal": "center",
"vertical": "top",
},
"fill": {
"patternType": 'solid',
'fgColor': {
'rgb': '006666FF',
'tint': 0.3,
},
},
"number_format": {
"format_code": "0.00"
},
"protection": {
"locked": True,
"hidden": False,
},
}
font_color = styles.Color('00FF0000')
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal='center', vertical='top')
fill_color = styles.Color(rgb='006666FF', tint=0.3)
fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
number_format = '0.00'
protection = styles.Protection(locked=True, hidden=False)
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
assert kw['font'] == font
assert kw['border'] == border
assert kw['alignment'] == alignment
assert kw['fill'] == fill
assert kw['number_format'] == number_format
assert kw['protection'] == protection
def test_write_cells_merge_styled(self, merge_cells, ext, engine):
from pandas.io.formats.excel import ExcelCell
sheet_name = 'merge_styled'
sty_b1 = {'font': {'color': '00FF0000'}}
sty_a2 = {'font': {'color': '0000FF00'}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {'font': {'color': '000000FF', 'bold': True}}
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = sty_kwargs['font']
merge_cells = [
ExcelCell(col=0, row=0, val='pandas',
mergestart=1, mergeend=1, style=sty_merged),
]
with ensure_clean(ext) as path:
writer = _OpenpyxlWriter(path)
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks['B1']
xcell_a2 = wks['A2']
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
@pytest.mark.parametrize("mode,expected", [
('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
import openpyxl
df = DataFrame([1], columns=['baz'])
with ensure_clean(ext) as f:
wb = openpyxl.Workbook()
wb.worksheets[0].title = 'foo'
wb.worksheets[0]['A1'].value = 'foo'
wb.create_sheet('bar')
wb.worksheets[1]['A1'].value = 'bar'
wb.save(f)
writer = ExcelWriter(f, engine=engine, mode=mode)
df.to_excel(writer, sheet_name='baz', index=False)
writer.save()
wb2 = openpyxl.load_workbook(f)
result = [sheet.title for sheet in wb2.worksheets]
assert result == expected
for index, cell_value in enumerate(expected):
assert wb2.worksheets[index]['A1'].value == cell_value
@td.skip_if_no('xlwt')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xls', 'xlwt')])
class TestXlwtTests(_WriterBase):
def test_excel_raise_error_on_multiindex_columns_and_no_index(
self, merge_cells, ext, engine):
# MultiIndex as columns is not yet implemented 9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(10, 3), columns=cols)
with pytest.raises(NotImplementedError):
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext,
engine):
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=True)
def test_excel_multiindex_index(self, merge_cells, ext, engine):
# MultiIndex as index works so assert no error #9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(3, 10), index=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
import xlwt
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
assert xls_style.font.bold
assert xlwt.Borders.THIN == xls_style.borders.top
assert xlwt.Borders.THIN == xls_style.borders.right
assert xlwt.Borders.THIN == xls_style.borders.bottom
assert xlwt.Borders.THIN == xls_style.borders.left
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlwt!"
with ensure_clean(ext) as f:
with tm.assert_raises_regex(ValueError, msg):
ExcelWriter(f, engine=engine, mode='a')
@td.skip_if_no('xlsxwriter')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'xlsxwriter')])
class TestXlsxWriterTests(_WriterBase):
@td.skip_if_no('openpyxl')
def test_column_format(self, merge_cells, ext, engine):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
import openpyxl
with ensure_clean(ext) as path:
frame = DataFrame({'A': [123456, 123456],
'B': [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = '#,##0'
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({'num_format': num_format})
write_worksheet.set_column('B:B', None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
try:
read_worksheet = read_workbook['Sheet1']
except TypeError:
# compat
read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
# Get the number format from the cell.
try:
cell = read_worksheet['B2']
except TypeError:
# compat
cell = read_worksheet.cell('B2')
try:
read_num_format = cell.number_format
except Exception:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlsxwriter!"
with ensure_clean(ext) as f:
with tm.assert_raises_regex(ValueError, msg):
ExcelWriter(f, engine=engine, mode='a')
class TestExcelWriterEngineTests(object):
@pytest.mark.parametrize('klass,ext', [
pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter')),
pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt'))
])
def test_ExcelWriter_dispatch(self, klass, ext):
with ensure_clean(ext) as path:
writer = ExcelWriter(path)
if ext == '.xlsx' and td.safe_import('xlsxwriter'):
# xlsxwriter has preference over openpyxl if both installed
assert isinstance(writer, _XlsxWriter)
else:
assert isinstance(writer, klass)
def test_ExcelWriter_dispatch_raises(self):
with tm.assert_raises_regex(ValueError, 'No engine'):
ExcelWriter('nothing')
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ['test', 'xlsx', 'xls']
engine = 'dummy'
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
assert len(called_save) >= 1
assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
with pd.option_context('io.excel.xlsx.writer', 'dummy'):
register_writer(DummyClass)
writer = ExcelWriter('something.test')
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
with catch_warnings(record=True):
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
check_called(func)
check_called(lambda: panel.to_excel('something.test'))
check_called(lambda: df.to_excel('something.xlsx'))
check_called(
lambda: df.to_excel(
'something.xls', engine='dummy'))
@pytest.mark.parametrize('engine', [
pytest.param('xlwt',
marks=pytest.mark.xfail(reason='xlwt does not support '
'openpyxl-compatible '
'style dicts',
strict=True)),
'xlsxwriter',
'openpyxl',
])
def test_styler_to_excel(engine):
def style(df):
# XXX: RGB colors not supported in xlwt
return DataFrame([['font-weight: bold', '', ''],
['', 'color: blue', ''],
['', '', 'text-decoration: underline'],
['border-style: solid', '', ''],
['', 'font-style: italic', ''],
['', '', 'text-align: right'],
['background-color: red', '', ''],
['number-format: 0%', '', ''],
['', '', ''],
['', '', ''],
['', '', '']],
index=df.index, columns=df.columns)
def assert_equal_style(cell1, cell2):
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
assert cell1.fill.__dict__ == cell2.fill.__dict__
assert cell1.font.__dict__ == cell2.font.__dict__
assert cell1.number_format == cell2.number_format
assert cell1.protection.__dict__ == cell2.protection.__dict__
def custom_converter(css):
# use bold iff there is custom style attached to the cell
if css.strip(' \n;'):
return {'font': {'bold': True}}
return {}
pytest.importorskip('jinja2')
pytest.importorskip(engine)
# Prepare spreadsheets
df = DataFrame(np.random.randn(11, 3))
with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path:
writer = ExcelWriter(path, engine=engine)
df.to_excel(writer, sheet_name='frame')
df.style.to_excel(writer, sheet_name='unstyled')
styled = df.style.apply(style, axis=None)
styled.to_excel(writer, sheet_name='styled')
ExcelFormatter(styled, style_converter=custom_converter).write(
writer, sheet_name='custom')
writer.save()
if engine not in ('openpyxl', 'xlsxwriter'):
# For other engines, we only smoke test
return
openpyxl = pytest.importorskip('openpyxl')
wb = openpyxl.load_workbook(path)
# (1) compare DataFrame.to_excel and Styler.to_excel when unstyled
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['unstyled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
assert_equal_style(cell1, cell2)
n_cells += 1
# ensure iteration actually happened:
assert n_cells == (11 + 1) * (3 + 1)
# (2) check styling with default converter
# XXX: openpyxl (as at 2.4) prefixes colors with 00, xlsxwriter with FF
alpha = '00' if engine == 'openpyxl' else 'FF'
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['styled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
# XXX: this isn't as strong a test as ideal; we should
# confirm that differences are exclusive
if ref == 'B2':
assert not cell1.font.bold
assert cell2.font.bold
elif ref == 'C3':
assert cell1.font.color.rgb != cell2.font.color.rgb
assert cell2.font.color.rgb == alpha + '0000FF'
elif ref == 'D4':
# This fails with engine=xlsxwriter due to
# https://bitbucket.org/openpyxl/openpyxl/issues/800
if engine == 'xlsxwriter' \
and (LooseVersion(openpyxl.__version__) <
LooseVersion('2.4.6')):
pass
else:
assert cell1.font.underline != cell2.font.underline
assert cell2.font.underline == 'single'
elif ref == 'B5':
assert not cell1.border.left.style
assert (cell2.border.top.style ==
cell2.border.right.style ==
cell2.border.bottom.style ==
cell2.border.left.style ==
'medium')
elif ref == 'C6':
assert not cell1.font.italic
assert cell2.font.italic
elif ref == 'D7':
assert (cell1.alignment.horizontal !=
cell2.alignment.horizontal)
assert cell2.alignment.horizontal == 'right'
elif ref == 'B8':
assert cell1.fill.fgColor.rgb != cell2.fill.fgColor.rgb
assert cell1.fill.patternType != cell2.fill.patternType
assert cell2.fill.fgColor.rgb == alpha + 'FF0000'
assert cell2.fill.patternType == 'solid'
elif ref == 'B9':
assert cell1.number_format == 'General'
assert cell2.number_format == '0%'
else:
assert_equal_style(cell1, cell2)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
# (3) check styling with custom converter
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['custom'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'):
assert not cell1.font.bold
assert cell2.font.bold
else:
assert_equal_style(cell1, cell2)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
@td.skip_if_no('openpyxl')
@pytest.mark.skipif(not PY36, reason='requires fspath')
class TestFSPath(object):
def test_excelfile_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
df = DataFrame({"A": [1, 2]})
df.to_excel(path)
xl = ExcelFile(path)
result = os.fspath(xl)
assert result == path
def test_excelwriter_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
writer = ExcelWriter(path)
assert os.fspath(writer) == str(path)
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/pyplots/make.py | 5 | 2007 | #!/usr/bin/env python
from __future__ import print_function
import sys, os, glob
import matplotlib
import IPython.Shell
#matplotlib.rcdefaults()
matplotlib.use('Agg')
mplshell = IPython.Shell.MatplotlibShell('mpl')
formats = [('png', 100),
('hires.png', 200),
('pdf', 72)]
def figs():
print('making figs')
import matplotlib.pyplot as plt
for fname in glob.glob('*.py'):
if fname.split('/')[-1] == __file__.split('/')[-1]: continue
basename, ext = os.path.splitext(fname)
imagefiles = dict([('%s.%s'%(basename, format), dpi)
for format, dpi in formats])
all_exists = True
for imagefile in imagefiles:
if not os.path.exists(imagefile):
all_exists = False
break
if all_exists:
print(' already have %s'%fname)
else:
print(' building %s'%fname)
plt.close('all') # we need to clear between runs
mplshell.magic_run(basename)
for imagefile, dpi in imagefiles.iteritems():
# todo: this will get called even if the run script
# fails and exits, thus creating a stub pdf and png
# iles preventing them from getting built successfully
# later
plt.savefig(imagefile, dpi=dpi)
print('all figures made')
def clean():
patterns = (['#*', '*~', '*pyc'] +
['*.%s' % format for format, dpi in formats])
for pattern in patterns:
for fname in glob.glob(pattern):
os.remove(fname)
print('all clean')
def all():
figs()
funcd = {'figs':figs,
'clean':clean,
'all':all,
}
if len(sys.argv)>1:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are'%(
arg, funcd.keys()))
func()
else:
all()
| mit |
sriki18/scipy | scipy/signal/ltisys.py | 25 | 122609 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <[email protected]>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from scipy._lib.six import xrange
from .filter_design import tf2zpk, zpk2tf, normalize, freqs, freqz
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant(object):
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(LinearTimeInvariant, self).__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
warnings.warn('Cross-class properties have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please use `sys.to_tf().num`instead.',
DeprecationWarning)
return self.to_tf().num
@num.setter
def num(self, num):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_tf()` '
'before setting `num`.',
DeprecationWarning)
obj = self.to_tf()
obj.num = num
source_class = type(self)
self._copy(source_class(obj))
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
warnings.warn('Cross-class properties have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please use `sys.to_tf().den`instead.',
DeprecationWarning)
return self.to_tf().den
@den.setter
def den(self, den):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_tf()` '
'before setting `den`.',
DeprecationWarning)
obj = self.to_tf()
obj.den = den
source_class = type(self)
self._copy(source_class(obj))
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@zeros.setter
def zeros(self, zeros):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_zpk()` '
'before setting `zeros`.',
DeprecationWarning)
obj = self.to_zpk()
obj.zeros = zeros
source_class = type(self)
self._copy(source_class(obj))
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
@poles.setter
def poles(self, poles):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_zpk()` '
'before setting `poles`.',
DeprecationWarning)
obj = self.to_zpk()
obj.poles = poles
source_class = type(self)
self._copy(source_class(obj))
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
warnings.warn('Cross-class properties have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please use `sys.to_zpk().gain`instead.',
DeprecationWarning)
return self.to_zpk().gain
@gain.setter
def gain(self, gain):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_zpk()` '
'before setting `gain`.',
DeprecationWarning)
obj = self.to_zpk()
obj.gain = gain
source_class = type(self)
self._copy(source_class(obj))
@property
def A(self):
"""State matrix of the `StateSpace` system."""
warnings.warn('Cross-class properties have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please use `sys.to_ss().A`instead.',
DeprecationWarning)
return self.to_ss().A
@A.setter
def A(self, A):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_ss()` '
'before setting `A`.',
DeprecationWarning)
obj = self.to_ss()
obj.A = A
source_class = type(self)
self._copy(source_class(obj))
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
warnings.warn('Cross-class properties have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please use `sys.to_ss().B`instead.',
DeprecationWarning)
return self.to_ss().B
@B.setter
def B(self, B):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_ss()` '
'before setting `B`.',
DeprecationWarning)
obj = self.to_ss()
obj.B = B
source_class = type(self)
self._copy(source_class(obj))
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
warnings.warn('Cross-class properties have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please use `sys.to_ss().C`instead.',
DeprecationWarning)
return self.to_ss().C
@C.setter
def C(self, C):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_ss()` '
'before setting `C`.',
DeprecationWarning)
obj = self.to_ss()
obj.C = C
source_class = type(self)
self._copy(source_class(obj))
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
warnings.warn('Cross-class properties have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please use `sys.to_ss().D`instead.',
DeprecationWarning)
return self.to_ss().D
@D.setter
def D(self, D):
warnings.warn('Cross-class setters have been deprecated in scipy '
'0.18.0 and will be removed in a future version of '
'scipy. Please convert your system with `sys.to_ss()` '
'before setting `D`.',
DeprecationWarning)
obj = self.to_ss()
obj.D = D
source_class = type(self)
self._copy(source_class(obj))
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([ 3., 4.]),
array([ 1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(lti, self).__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([ 3., 4.]),
array([ 1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super(dlti, self).__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3) with sampling time 0.5s
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
Contruct the transfer function with a sampling time of 0.5 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(TransferFunction, self).__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function with a sampling time of 0.5 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super(ZerosPolesGain, self).__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(StateSpace, self).__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(s) = 5 / (s-1)^3
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_tf()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_tf()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and begining of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, dlti):
system = system._as_tf()
elif isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with discrete-time '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_tf()
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# Convert numerator and denominator from polynomials in the variable 'z'
# to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
return freqz(num, den, worN=worN, whole=whole)
def dbode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| bsd-3-clause |
lesserwhirls/scipy-cwt | scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
xuyongzhi/scan_volume | src/rotate3D/scripts/active_scanner.py | 1 | 1219 | #!/usr/bin/env python
import rospy
import rosbag
from std_msgs.msg import String
from sensor_msgs.msg import LaserScan
from rotate_to_3D import RotateTo3D
import matplotlib.pyplot as plt
import time
import math
Rotate_To_3D = RotateTo3D()
t0 = None
g_scann = -1
def callback(data):
global t0, g_scann
g_scann += 1
if g_scann == 0:
t0 = time.time()
scanN, theta = Rotate_To_3D.push(data)
if scanN > 0 and scanN %50 == 0:
frequency = g_scann / (time.time()-t0)
rospy.loginfo(rospy.get_caller_id() + 'scanN = %f, theta = %f, fre=%0.2f',scanN, theta*180.0/math.pi, frequency)
def start_stop( start_stop_str ):
if start_stop_str.data == 'start':
rospy.loginfo('send start command')
Rotate_To_3D.start()
elif start_stop_str.data == 'stop':
rospy.loginfo('send stop command')
Rotate_To_3D.stop()
def listener():
rospy.init_node('rotate_to_3D',anonymous=True)
rospy.Subscriber('scan',LaserScan,callback)
rospy.Subscriber('start_stop_command', String, start_stop)
rospy.loginfo('rotate_to_3D is ready, waiting for start command\nThe 3d result topic is: pcl_3d')
rospy.spin()
if __name__ == '__main__':
listener()
| mit |
jstoxrocky/statsmodels | examples/python/glm_formula.py | 33 | 1547 |
## Generalized Linear Models (Formula)
# This notebook illustrates how you can use R-style formulas to fit Generalized Linear Models.
#
# To begin, we load the ``Star98`` dataset and we construct a formula and pre-process the data:
from __future__ import print_function
import statsmodels.api as sm
import statsmodels.formula.api as smf
star98 = sm.datasets.star98.load_pandas().data
formula = 'SUCCESS ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
dta = star98[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP',
'PCTCHRT', 'PCTYRRND', 'PERMINTE', 'AVYRSEXP', 'AVSALK',
'PERSPENK', 'PTRATIO', 'PCTAF']]
endog = dta['NABOVE'] / (dta['NABOVE'] + dta.pop('NBELOW'))
del dta['NABOVE']
dta['SUCCESS'] = endog
# Then, we fit the GLM model:
mod1 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod1.summary()
# Finally, we define a function to operate customized data transformation using the formula framework:
def double_it(x):
return 2 * x
formula = 'SUCCESS ~ double_it(LOWINC) + PERASIAN + PERBLACK + PERHISP + PCTCHRT + PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'
mod2 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit()
mod2.summary()
# As expected, the coefficient for ``double_it(LOWINC)`` in the second model is half the size of the ``LOWINC`` coefficient from the first model:
print(mod1.params[1])
print(mod2.params[1] * 2)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.