repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
macks22/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
anntzer/scikit-learn | sklearn/utils/estimator_checks.py | 2 | 119596 | import types
import warnings
import pickle
import re
from copy import deepcopy
from functools import partial, wraps
from inspect import signature
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import joblib
from . import IS_PYPY
from .. import config_context
from ._testing import _get_args
from ._testing import assert_raise_message
from ._testing import assert_array_equal
from ._testing import assert_array_almost_equal
from ._testing import assert_allclose
from ._testing import assert_allclose_dense_sparse
from ._testing import set_random_state
from ._testing import SkipTest
from ._testing import ignore_warnings
from ._testing import create_memmap_backed_data
from ._testing import raises
from . import is_scalar_nan
from ..linear_model import LogisticRegression
from ..linear_model import Ridge
from ..base import (
clone,
ClusterMixin,
is_classifier,
is_regressor,
is_outlier_detector,
RegressorMixin,
_is_pairwise,
)
from ..metrics import accuracy_score, adjusted_rand_score, f1_score
from ..random_projection import BaseRandomProjection
from ..feature_selection import SelectKBest
from ..pipeline import make_pipeline
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..exceptions import SkipTestWarning
from ..model_selection import train_test_split
from ..model_selection import ShuffleSplit
from ..model_selection._validation import _safe_split
from ..metrics.pairwise import (rbf_kernel, linear_kernel, pairwise_distances)
from .import shuffle
from ._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
from .validation import has_fit_parameter, _num_samples
from ..preprocessing import StandardScaler
from ..preprocessing import scale
from ..datasets import (
load_iris,
make_blobs,
make_multilabel_classification,
make_regression
)
REGRESSION_DATASET = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _yield_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
pairwise = _is_pairwise(estimator)
yield check_no_attributes_set_in_init
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_sample_weights_pandas_series
yield check_sample_weights_not_an_array
yield check_sample_weights_list
yield check_sample_weights_shape
if has_fit_parameter(estimator, "sample_weight") and not pairwise:
# We skip pairwise because the data is not pairwise
yield partial(check_sample_weights_invariance, kind='ones')
yield partial(check_sample_weights_invariance, kind='zeros')
yield check_estimators_fit_returns_self
yield partial(check_estimators_fit_returns_self, readonly_memmap=True)
# Check that all estimator yield informative messages when
# trained on empty datasets
if not tags["no_validation"]:
yield check_complex_data
yield check_dtype_object
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION:
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if not tags["allow_nan"] and not tags["no_validation"]:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if pairwise:
# Check that pairwise estimator throws error on non-square input
yield check_nonsquare_error
yield check_estimators_overwrite_params
if hasattr(estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
yield check_estimator_get_tags_default_keys
def _yield_classifier_checks(classifier):
tags = _safe_tags(classifier)
# test classifiers can handle non-array data and pandas objects
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_classifier_multioutput
# basic consistency testing
yield check_classifiers_train
yield partial(check_classifiers_train, readonly_memmap=True)
yield partial(check_classifiers_train, readonly_memmap=True,
X_dtype='float32')
yield check_classifiers_regression_target
if tags["multilabel"]:
yield check_classifiers_multilabel_representation_invariance
if not tags["no_validation"]:
yield check_supervised_y_no_nan
if not tags['multioutput_only']:
yield check_supervised_y_2d
if tags["requires_fit"]:
yield check_estimators_unfitted
if 'class_weight' in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=FutureWarning)
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.full(10, np.inf)
y = _enforce_estimator_tags_y(estimator, y)
match = (
"Input contains NaN, infinity or a value too large for "
r"dtype\('float64'\)."
)
err_msg = (
f"Estimator {name} should have raised error on fitting "
"array y with NaN value."
)
with raises(ValueError, match=match, err_msg=err_msg):
estimator.fit(X, y)
def _yield_regressor_checks(regressor):
tags = _safe_tags(regressor)
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield partial(check_regressors_train, readonly_memmap=True)
yield partial(check_regressors_train, readonly_memmap=True,
X_dtype='float32')
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_regressor_multioutput
yield check_regressors_no_decision_function
if not tags["no_validation"] and not tags['multioutput_only']:
yield check_supervised_y_2d
yield check_supervised_y_no_nan
name = regressor.__class__.__name__
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if tags["requires_fit"]:
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(transformer):
tags = _safe_tags(transformer)
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not tags["no_validation"]:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
if tags["preserves_dtype"]:
yield check_transformer_preserve_dtypes
yield partial(check_transformer_general, readonly_memmap=True)
if not _safe_tags(transformer, key="stateless"):
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
name = transformer.__class__.__name__
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(clusterer):
yield check_clusterer_compute_labels_predict
name = clusterer.__class__.__name__
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield partial(check_clustering, readonly_memmap=True)
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_outliers_checks(estimator):
# checks for outlier detectors that have a fit_predict method
if hasattr(estimator, 'fit_predict'):
yield check_outliers_fit_predict
# checks for estimators that can be used on a test set
if hasattr(estimator, 'predict'):
yield check_outliers_train
yield partial(check_outliers_train, readonly_memmap=True)
# test outlier detectors can handle non-array data
yield check_classifier_data_not_an_array
# test if NotFittedError is raised
if _safe_tags(estimator, key="requires_fit"):
yield check_estimators_unfitted
def _yield_all_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
if "2darray" not in tags["X_types"]:
warnings.warn("Can't test estimator {} which requires input "
" of type {}".format(name, tags["X_types"]),
SkipTestWarning)
return
if tags["_skip_test"]:
warnings.warn("Explicit SKIP via _skip_test tag for estimator "
"{}.".format(name),
SkipTestWarning)
return
for check in _yield_checks(estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(estimator):
yield check
if hasattr(estimator, 'transform'):
for check in _yield_transformer_checks(estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(estimator):
yield check
if is_outlier_detector(estimator):
for check in _yield_outliers_checks(estimator):
yield check
yield check_parameters_default_constructible
yield check_methods_sample_order_invariance
yield check_methods_subset_invariance
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_get_params_invariance
yield check_set_params
yield check_dict_unchanged
yield check_dont_overwrite_parameters
yield check_fit_idempotent
if not tags["no_validation"]:
yield check_n_features_in
yield check_fit1d
yield check_fit2d_predict1d
if tags["requires_y"]:
yield check_requires_y_none
if tags["requires_positive_X"]:
yield check_fit_non_negative
def _get_check_estimator_ids(obj):
"""Create pytest ids for checks.
When `obj` is an estimator, this returns the pprint version of the
estimator (with `print_changed_only=True`). When `obj` is a function, the
name of the function is returned with its keyword arguments.
`_get_check_estimator_ids` is designed to be used as the `id` in
`pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`
is yielding estimators and checks.
Parameters
----------
obj : estimator or function
Items generated by `check_estimator`.
Returns
-------
id : str or None
See Also
--------
check_estimator
"""
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v)
for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
if hasattr(obj, "get_params"):
with config_context(print_changed_only=True):
return re.sub(r"\s", "", str(obj))
def _construct_instance(Estimator):
"""Construct Estimator instance if possible."""
required_parameters = getattr(Estimator, "_required_parameters", [])
if len(required_parameters):
if required_parameters in (["estimator"], ["base_estimator"]):
if issubclass(Estimator, RegressorMixin):
estimator = Estimator(Ridge())
else:
estimator = Estimator(LogisticRegression(C=1))
elif required_parameters in (['estimators'],):
# Heterogeneous ensemble classes (i.e. stacking, voting)
if issubclass(Estimator, RegressorMixin):
estimator = Estimator(estimators=[
("est1", Ridge(alpha=0.1)),
("est2", Ridge(alpha=1))
])
else:
estimator = Estimator(estimators=[
("est1", LogisticRegression(C=0.1)),
("est2", LogisticRegression(C=1))
])
else:
msg = (f"Can't instantiate estimator {Estimator.__name__} "
f"parameters {required_parameters}")
# raise additional warning to be shown by pytest
warnings.warn(msg, SkipTestWarning)
raise SkipTest(msg)
else:
estimator = Estimator()
return estimator
def _maybe_mark_xfail(estimator, check, pytest):
# Mark (estimator, check) pairs as XFAIL if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_skip(), but this one is used by
# @parametrize_with_checks() instead of check_estimator()
should_be_marked, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_marked:
return estimator, check
else:
return pytest.param(estimator, check,
marks=pytest.mark.xfail(reason=reason))
def _maybe_skip(estimator, check):
# Wrap a check so that it's skipped if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_mark_xfail(), but this one is used by
# check_estimator() instead of @parametrize_with_checks which requires
# pytest
should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_skipped:
return check
check_name = (check.func.__name__ if isinstance(check, partial)
else check.__name__)
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(
f"Skipping {check_name} for {estimator.__class__.__name__}: "
f"{reason}"
)
return wrapped
def _should_be_skipped_or_marked(estimator, check):
# Return whether a check should be skipped (when using check_estimator())
# or marked as XFAIL (when using @parametrize_with_checks()), along with a
# reason.
# Currently, a check should be skipped or marked if
# the check is in the _xfail_checks tag of the estimator
check_name = (check.func.__name__ if isinstance(check, partial)
else check.__name__)
xfail_checks = _safe_tags(estimator, key='_xfail_checks') or {}
if check_name in xfail_checks:
return True, xfail_checks[check_name]
return False, 'placeholder reason that will never be used'
def parametrize_with_checks(estimators):
"""Pytest specific decorator for parametrizing estimator checks.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24. Pass an instance instead.
.. versionadded:: 0.24
Returns
-------
decorator : `pytest.mark.parametrize`
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
import pytest
if any(isinstance(est, type) for est in estimators):
msg = ("Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead.")
raise TypeError(msg)
def checks_generator():
for estimator in estimators:
name = type(estimator).__name__
for check in _yield_all_checks(estimator):
check = partial(check, name)
yield _maybe_mark_xfail(estimator, check, pytest)
return pytest.mark.parametrize("estimator, check", checks_generator(),
ids=_get_check_estimator_ids)
def check_estimator(Estimator, generate_only=False):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc, making sure that the estimator complies with `scikit-learn`
conventions as detailed in :ref:`rolling_your_own_estimator`.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Setting `generate_only=True` returns a generator that yields (estimator,
check) tuples where the check can be called independently from each
other, i.e. `check(estimator)`. This allows all checks to be run
independently and report the checks that are failing.
scikit-learn provides a pytest specific decorator,
:func:`~sklearn.utils.parametrize_with_checks`, making it easier to test
multiple estimators.
Parameters
----------
Estimator : estimator object
Estimator instance to check.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24.
generate_only : bool, default=False
When `False`, checks are evaluated when `check_estimator` is called.
When `True`, `check_estimator` returns a generator that yields
(estimator, check) tuples. The check is run by calling
`check(estimator)`.
.. versionadded:: 0.22
Returns
-------
checks_generator : generator
Generator that yields (estimator, check) tuples. Returned when
`generate_only=True`.
"""
if isinstance(Estimator, type):
msg = ("Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead.")
raise TypeError(msg)
estimator = Estimator
name = type(estimator).__name__
def checks_generator():
for check in _yield_all_checks(estimator):
check = _maybe_skip(estimator, check)
yield estimator, partial(check, name)
if generate_only:
return checks_generator()
for estimator, check in checks_generator():
try:
check(estimator)
except SkipTest as exception:
# SkipTest is thrown when pandas can't be imported, or by checks
# that are in the xfail_checks tag
warnings.warn(str(exception), SkipTestWarning)
def _regression_dataset():
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
X, y = make_regression(
n_samples=200, n_features=10, n_informative=1,
bias=5.0, noise=20, random_state=42,
)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = X, y
return REGRESSION_DATASET
def _set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
name = estimator.__class__.__name__
if ("n_iter" in params and name != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
# FIXME : init should be removed in 1.1
estimator.set_params(max_iter=500, init='nndsvda')
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if name == 'TruncatedSVD':
# TruncatedSVD doesn't run with n_components = n_features
# This is ugly :-/
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = min(estimator.n_clusters, 2)
if hasattr(estimator, "n_best"):
estimator.n_best = 1
if name == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if name == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if name in ('HistGradientBoostingClassifier',
'HistGradientBoostingRegressor'):
# The default min_samples_leaf (20) isn't appropriate for small
# datasets (only very shallow trees are built) that the checks use.
estimator.set_params(min_samples_leaf=5)
if name == 'DummyClassifier':
# the default strategy prior would output constant predictions and fail
# for check_classifiers_predictions
estimator.set_params(strategy='stratified')
# Speed-up by reducing the number of CV or splits for CV estimators
loo_cv = ['RidgeCV']
if name not in loo_cv and hasattr(estimator, 'cv'):
estimator.set_params(cv=3)
if hasattr(estimator, 'n_splits'):
estimator.set_params(n_splits=3)
if name == 'OneHotEncoder':
estimator.set_params(handle_unknown='ignore')
if name in CROSS_DECOMPOSITION:
estimator.set_params(n_components=1)
class _NotAnArray:
"""An object that is convertible to an array.
Parameters
----------
data : array-like
The data.
"""
def __init__(self, data):
self.data = np.asarray(data)
def __array__(self, dtype=None):
return self.data
def __array_function__(self, func, types, args, kwargs):
if func.__name__ == "may_share_memory":
return True
raise TypeError("Don't want to call array_function {}!".format(
func.__name__))
def _is_pairwise_metric(estimator):
"""Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
metric = getattr(estimator, "metric", None)
return bool(metric == 'precomputed')
def _pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel):
if _is_pairwise_metric(estimator):
return pairwise_distances(X, metric='euclidean')
if _is_pairwise(estimator):
return kernel(X, X)
return X
def _generate_sparse_matrix(X_csr):
"""Generate sparse matrices with {32,64}bit indices of diverse format.
Parameters
----------
X_csr: CSR Matrix
Input matrix in CSR format.
Returns
-------
out: iter(Matrices)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
"""
assert X_csr.format == 'csr'
yield 'csr', X_csr.copy()
for sparse_format in ['dok', 'lil', 'dia', 'bsr', 'csc', 'coo']:
yield sparse_format, X_csr.asformat(sparse_format)
# Generate large indices matrix only if its supported by scipy
X_coo = X_csr.asformat('coo')
X_coo.row = X_coo.row.astype('int64')
X_coo.col = X_coo.col.astype('int64')
yield "coo_64", X_coo
for sparse_format in ['csc', 'csr']:
X = X_csr.asformat(sparse_format)
X.indices = X.indices.astype('int64')
X.indptr = X.indptr.astype('int64')
yield sparse_format + "_64", X
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = _pairwise_estimator_convert_X(X, estimator_orig)
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(int)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
tags = _safe_tags(estimator_orig)
for matrix_format, X in _generate_sparse_matrix(X_csr):
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
if name in ['Scaler', 'StandardScaler']:
estimator.set_params(with_mean=False)
# fit and predict
if "64" in matrix_format:
err_msg = (
f"Estimator {name} doesn't seem to support {matrix_format} "
"matrix, and is not failing gracefully, e.g. by using "
"check_array(X, accept_large_sparse=False)"
)
else:
err_msg = (
f"Estimator {name} doesn't seem to fail gracefully on sparse "
"data: error message should state explicitly that sparse "
"input is not supported if this is not the case."
)
with raises(
(TypeError, ValueError),
match=["sparse", "Sparse"],
may_pass=True,
err_msg=err_msg,
):
with ignore_warnings(category=FutureWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
if tags['multioutput_only']:
assert pred.shape == (X.shape[0], 1)
else:
assert pred.shape == (X.shape[0],)
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
if tags['binary_only']:
expected_probs_shape = (X.shape[0], 2)
else:
expected_probs_shape = (X.shape[0], 4)
assert probs.shape == expected_probs_shape
@ignore_warnings(category=FutureWarning)
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4],
[3, 1], [3, 2], [3, 3], [3, 4]])
X = pd.DataFrame(_pairwise_estimator_convert_X(X, estimator_orig))
y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = pd.Series([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = pd.DataFrame(y)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_not_an_array(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type _NotAnArray in the 'fit' function.
estimator = clone(estimator_orig)
if has_fit_parameter(estimator, "sample_weight"):
X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],
[2, 1], [2, 2], [2, 3], [2, 4],
[3, 1], [3, 2], [3, 3], [3, 4]])
X = _NotAnArray(_pairwise_estimator_convert_X(X, estimator_orig))
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = _NotAnArray([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = _NotAnArray(y.data.reshape(-1, 1))
estimator.fit(X, y, sample_weight=weights)
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
if has_fit_parameter(estimator_orig, "sample_weight"):
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)),
estimator_orig)
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator, y)
sample_weight = [3] * n_samples
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=FutureWarning)
def check_sample_weights_shape(name, estimator_orig):
# check that estimators raise an error if sample_weight
# shape mismatches the input
if (has_fit_parameter(estimator_orig, "sample_weight") and
not _is_pairwise(estimator_orig)):
estimator = clone(estimator_orig)
X = np.array([[1, 3], [1, 3], [1, 3], [1, 3],
[2, 1], [2, 1], [2, 1], [2, 1],
[3, 3], [3, 3], [3, 3], [3, 3],
[4, 1], [4, 1], [4, 1], [4, 1]])
y = np.array([1, 1, 1, 1, 2, 2, 2, 2,
1, 1, 1, 1, 2, 2, 2, 2])
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y, sample_weight=np.ones(len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones(2 * len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones((len(y), 2)))
@ignore_warnings(category=FutureWarning)
def check_sample_weights_invariance(name, estimator_orig, kind="ones"):
# For kind="ones" check that the estimators yield same results for
# unit weights and no weights
# For kind="zeros" check that setting sample_weight to 0 is equivalent
# to removing corresponding samples.
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X1 = np.array([[1, 3], [1, 3], [1, 3], [1, 3],
[2, 1], [2, 1], [2, 1], [2, 1],
[3, 3], [3, 3], [3, 3], [3, 3],
[4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64)
y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2,
1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
if kind == 'ones':
X2 = X1
y2 = y1
sw2 = np.ones(shape=len(y1))
err_msg = (f"For {name} sample_weight=None is not equivalent to "
f"sample_weight=ones")
elif kind == 'zeros':
# Construct a dataset that is very different to (X, y) if weights
# are disregarded, but identical to (X, y) given weights.
X2 = np.vstack([X1, X1 + 1])
y2 = np.hstack([y1, 3 - y1])
sw2 = np.ones(shape=len(y1) * 2)
sw2[len(y1):] = 0
X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0)
err_msg = (f"For {name}, a zero sample_weight is not equivalent "
f"to removing the sample")
else: # pragma: no cover
raise ValueError
y1 = _enforce_estimator_tags_y(estimator1, y1)
y2 = _enforce_estimator_tags_y(estimator2, y2)
estimator1.fit(X1, y=y1, sample_weight=None)
estimator2.fit(X2, y=y2, sample_weight=sw2)
for method in ["predict", "predict_proba",
"decision_function", "transform"]:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X1)
X_pred2 = getattr(estimator2, method)(X1)
assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)
@ignore_warnings(category=(FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = _pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig)
X = X.astype(object)
tags = _safe_tags(estimator_orig)
y = (X[:, 0] * 4).astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
with raises(Exception, match="Unknown label type", may_pass=True):
estimator.fit(X, y.astype(object))
if 'string' not in tags['X_types']:
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string.* number"
with raises(TypeError, match=msg):
estimator.fit(X, y)
else:
# Estimators supporting string will not call np.asarray to convert the
# data to numeric and therefore, the error will not be raised.
# Checking for each element dtype in the input array will be costly.
# Refer to #11401 for full discussion.
estimator.fit(X, y)
def check_complex_data(name, estimator_orig):
# check that estimators raise an exception on providing complex data
X = np.random.sample(10) + 1j * np.random.sample(10)
X = X.reshape(-1, 1)
y = np.random.sample(10) + 1j * np.random.sample(10)
estimator = clone(estimator_orig)
with raises(ValueError, match="Complex data not supported"):
estimator.fit(X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert estimator.__dict__ == dict_before, (
'Estimator changes __dict__ during %s' % method)
def _is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
@ignore_warnings(category=FutureWarning)
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if _is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert not attrs_added_by_fit, (
'Estimator adds public attribute(s) during' ' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added'
% ', '.join(attrs_added_by_fit))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert not attrs_changed_by_fit, (
'Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed'
% ', '.join(attrs_changed_by_fit))
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
def _apply_on_subsets(func, X):
# apply function on the whole set and on mini batches
result_full = func(X)
n_features = X.shape[1]
result_by_batch = [func(batch.reshape(1, n_features))
for batch in X]
# func can output tuple (e.g. score_samples)
if type(result_full) == tuple:
result_full = result_full[0]
result_by_batch = list(map(lambda x: x[0], result_by_batch))
if sparse.issparse(result_full):
result_full = result_full.A
result_by_batch = [x.A for x in result_by_batch]
return np.ravel(result_full), np.ravel(result_by_batch)
@ignore_warnings(category=FutureWarning)
def check_methods_subset_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on mini batches or the whole set
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"score_samples", "predict_proba"]:
msg = ("{method} of {name} is not invariant when applied "
"to a subset.").format(method=method, name=name)
if hasattr(estimator, method):
result_full, result_by_batch = _apply_on_subsets(
getattr(estimator, method), X)
assert_allclose(result_full, result_by_batch,
atol=1e-7, err_msg=msg)
@ignore_warnings(category=FutureWarning)
def check_methods_sample_order_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on a subset with different sample order
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int64)
if _safe_tags(estimator_orig, key='binary_only'):
y[y == 2] = 1
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 2
set_random_state(estimator, 1)
estimator.fit(X, y)
idx = np.random.permutation(X.shape[0])
for method in ["predict", "transform", "decision_function",
"score_samples", "predict_proba"]:
msg = ("{method} of {name} is not invariant when applied to a dataset"
"with different sample order.").format(method=method, name=name)
if hasattr(estimator, method):
assert_allclose_dense_sparse(getattr(estimator, method)(X)[idx],
getattr(estimator, method)(X[idx]),
atol=1e-9,
err_msg=msg)
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
# min_cluster_size cannot be less than the data size for OPTICS.
if name == 'OPTICS':
estimator.set_params(min_samples=1)
msgs = ["1 sample", "n_samples = 1", "n_samples=1", "one sample",
"1 class", "one class"]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == 'RandomizedLogisticRegression':
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == 'RANSACRegressor':
estimator.residual_threshold = 0.5
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator, 1)
msgs = [r"1 feature\(s\)", "n_features = 1", "n_features=1"]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
with raises(ValueError):
estimator.fit(X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_general(name, transformer, readonly_memmap=False):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
X = _pairwise_estimator_convert_X(X, transformer)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
_check_transformer(name, transformer, X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
X = _pairwise_estimator_convert_X(X, transformer)
this_X = _NotAnArray(X)
this_y = _NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
# try the same with some list
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=FutureWarning)
def check_transformers_unfitted(name, transformer):
X, y = _regression_dataset()
transformer = clone(transformer)
with raises(
(AttributeError, ValueError),
err_msg="The unfitted "
f"transformer {name} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform.",
):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
if isinstance(X, _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
# check for consistent n_samples
assert X_pred.shape[0] == n_samples
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if _safe_tags(transformer_orig, key='non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred, x_pred2, atol=1e-2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer)
assert_allclose_dense_sparse(
x_pred, x_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
else:
assert_allclose_dense_sparse(
X_pred, X_pred2,
err_msg="fit_transform and transform outcomes "
"not consistent in %s"
% transformer, atol=1e-2)
assert_allclose_dense_sparse(
X_pred, X_pred3, atol=1e-2,
err_msg="consecutive fit_transform outcomes "
"not consistent in %s"
% transformer)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
# raises error on malformed input for transform
if hasattr(X, 'shape') and \
not _safe_tags(transformer, key="stateless") and \
X.ndim == 2 and X.shape[1] > 1:
# If it's not an array, it does not have a 'T' property
with raises(
ValueError,
err_msg=f"The transformer {name} does not raise an error "
"when the number of features in transform is different from "
"the number of features in fit."
):
transformer.transform(X[:, :-1])
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if _safe_tags(estimator_orig, key='non_deterministic'):
msg = name + ' is non deterministic'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
n_samples = 30
X = rnd.uniform(size=(n_samples, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = np.arange(n_samples) % 3
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert args[1] in ["y", "Y"], (
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_32 = _pairwise_estimator_convert_X(X_train_32, estimator_orig)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = _enforce_estimator_tags_y(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_transformer_preserve_dtypes(name, transformer_orig):
# check that dtype are preserved meaning if input X is of some dtype
# X_transformed should be from the same dtype.
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X -= X.min()
X = _pairwise_estimator_convert_X(X, transformer_orig)
for dtype in _safe_tags(transformer_orig, key="preserves_dtype"):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans = transformer.fit_transform(X_cast, y)
if isinstance(X_trans, tuple):
# cross-decompostion returns a tuple of (x_scores, y_scores)
# when given y with fit_transform; only check the first element
X_trans = X_trans[0]
# check that the output dtype is preserved
assert X_trans.dtype == dtype, (
f'Estimator transform dtype: {X_trans.dtype} - '
f'original/expected dtype: {dtype.__name__}'
)
@ignore_warnings(category=FutureWarning)
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
err_msg = (
f"The estimator {name} does not raise an error when an "
"empty data is used to train. Perhaps use check_array in train."
)
with raises(ValueError, err_msg=err_msg):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(12, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = _enforce_estimator_tags_y(
e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])
)
msg = (
r"0 feature\(s\) \(shape=\(\d*, 0\)\) while a minimum of \d* "
"is required."
)
with raises(ValueError, match=msg):
e.fit(X_zero_features, y)
@ignore_warnings(category=FutureWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = _pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)),
estimator_orig)
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = _enforce_estimator_tags_y(estimator_orig, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
with raises(
ValueError, match=["inf", "NaN"], err_msg=error_string_fit
):
estimator.fit(X_train, y)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_predict,
):
estimator.predict(X_train)
# transform
if hasattr(estimator, "transform"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_transform,
):
estimator.transform(X_train)
@ignore_warnings
def check_nonsquare_error(name, estimator_orig):
"""Test that error is thrown when non-square data provided."""
X, y = make_blobs(n_samples=20, n_features=10)
estimator = clone(estimator_orig)
with raises(
ValueError,
err_msg=f"The pairwise estimator {name} does not raise an error "
"on non-square data",
):
estimator.fit(X, y)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators."""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
tags = _safe_tags(estimator_orig)
# include NaN values when the estimator should deal with them
if tags['allow_nan']:
# set randomly 10 elements to np.nan
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
module_name = estimator.__module__
if module_name.startswith('sklearn.') and not (
"test_" in module_name or module_name.endswith("_testing")
):
# strict check for sklearn estimators that are not implemented in test
# modules.
assert b"version" in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=FutureWarning)
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, 'partial_fit'):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
y = _enforce_estimator_tags_y(estimator_orig, y)
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with raises(
ValueError,
err_msg=f"The estimator {name} does not raise an error when the "
"number of features changes between calls to partial_fit.",
):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=FutureWarning)
def check_classifier_multioutput(name, estimator):
n_samples, n_labels, n_classes = 42, 5, 3
tags = _safe_tags(estimator)
estimator = clone(estimator)
X, y = make_multilabel_classification(random_state=42,
n_samples=n_samples,
n_labels=n_labels,
n_classes=n_classes)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples, n_classes), (
"The shape of the prediction for multioutput data is "
"incorrect. Expected {}, got {}."
.format((n_samples, n_labels), y_pred.shape))
assert y_pred.dtype.kind == 'i'
if hasattr(estimator, "decision_function"):
decision = estimator.decision_function(X)
assert isinstance(decision, np.ndarray)
assert decision.shape == (n_samples, n_classes), (
"The shape of the decision function output for "
"multioutput data is incorrect. Expected {}, got {}."
.format((n_samples, n_classes), decision.shape))
dec_pred = (decision > 0).astype(int)
dec_exp = estimator.classes_[dec_pred]
assert_array_equal(dec_exp, y_pred)
if hasattr(estimator, "predict_proba"):
y_prob = estimator.predict_proba(X)
if isinstance(y_prob, list) and not tags['poor_score']:
for i in range(n_classes):
assert y_prob[i].shape == (n_samples, 2), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}."
.format((n_samples, 2), y_prob[i].shape))
assert_array_equal(
np.argmax(y_prob[i], axis=1).astype(int),
y_pred[:, i]
)
elif not tags['poor_score']:
assert y_prob.shape == (n_samples, n_classes), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}."
.format((n_samples, n_classes), y_prob.shape))
assert_array_equal(y_prob.round().astype(int), y_pred)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
for i in range(n_classes):
y_proba = estimator.predict_proba(X)[:, i]
y_decision = estimator.decision_function(X)
assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i]))
@ignore_warnings(category=FutureWarning)
def check_regressor_multioutput(name, estimator):
estimator = clone(estimator)
n_samples = n_features = 10
if not _is_pairwise_metric(estimator):
n_samples = n_samples + 1
X, y = make_regression(random_state=42, n_targets=5,
n_samples=n_samples, n_features=n_features)
X = _pairwise_estimator_convert_X(X, estimator)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.dtype == np.dtype('float64'), (
"Multioutput predictions by a regressor are expected to be"
" floating-point precision. Got {} instead".format(y_pred.dtype))
assert y_pred.shape == y.shape, (
"The shape of the prediction for multioutput data is incorrect."
" Expected {}, got {}.")
@ignore_warnings(category=FutureWarning)
def check_clustering(name, clusterer_orig, readonly_memmap=False):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
rng = np.random.RandomState(7)
X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])
if readonly_memmap:
X, y, X_noise = create_memmap_backed_data([X, y, X_noise])
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == 'AffinityPropagation':
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert pred.shape == (n_samples,)
assert adjusted_rand_score(pred, y) > 0.4
if _safe_tags(clusterer, key='non_deterministic'):
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert pred.dtype in [np.dtype('int32'), np.dtype('int64')]
assert pred2.dtype in [np.dtype('int32'), np.dtype('int64')]
# Add noise to X to test the possible values of the labels
labels = clusterer.fit_predict(X_noise)
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
labels_sorted = np.unique(labels)
assert_array_equal(labels_sorted, np.arange(labels_sorted[0],
labels_sorted[-1] + 1))
# Labels are expected to start at 0 (no noise) or -1 (if noise)
assert labels_sorted[0] in [0, -1]
# Labels should be less than n_clusters - 1
if hasattr(clusterer, 'n_clusters'):
n_clusters = getattr(clusterer, 'n_clusters')
assert n_clusters - 1 >= labels_sorted[-1]
# else labels should be less than max(labels_) which is necessarily true
@ignore_warnings(category=FutureWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels."""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
set_random_state(clusterer)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=FutureWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
classifier = clone(classifier_orig)
with raises(
ValueError, match="class", may_pass=True, err_msg=error_string_fit
) as cm:
classifier.fit(X_train, y)
if cm.raised_and_matched:
# ValueError was raised with proper error message
return
assert_array_equal(
classifier.predict(X_test), y, err_msg=error_string_predict
)
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(
name, classifier_orig, readonly_memmap=False, X_dtype="float64"
):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m = X_m.astype(X_dtype)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB',
'CategoricalNB']:
X_m -= X_m.min()
X_b -= X_b.min()
if readonly_memmap:
X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b])
problems = [(X_b, y_b)]
tags = _safe_tags(classifier_orig)
if not tags['binary_only']:
problems.append((X_m, y_m))
for (X, y) in problems:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
X = _pairwise_estimator_convert_X(X, classifier)
y = _enforce_estimator_tags_y(classifier, y)
set_random_state(classifier)
# raises error on malformed input for fit
if not tags["no_validation"]:
with raises(
ValueError,
err_msg=f"The classifier {name} does not raise an error when "
"incorrect/malformed input data for fit is passed. The number "
"of training examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit.",
):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert hasattr(classifier, "classes_")
y_pred = classifier.predict(X)
assert y_pred.shape == (n_samples,)
# training set performance
if not tags['poor_score']:
assert accuracy_score(y, y_pred) > 0.83
# raises error on malformed input for predict
msg_pairwise = (
"The classifier {} does not raise an error when shape of X in "
" {} is not equal to (n_test_samples, n_training_samples)")
msg = ("The classifier {} does not raise an error when the number of "
"features in {} is different from the number of features in "
"fit.")
if not tags["no_validation"]:
if _is_pairwise(classifier):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict"),
):
classifier.predict(X.reshape(-1, 1))
else:
with raises(ValueError, err_msg=msg.format(name, "predict")):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
if not tags["multioutput_only"]:
assert decision.shape == (n_samples,)
else:
assert decision.shape == (n_samples, 1)
dec_pred = (decision.ravel() > 0).astype(int)
assert_array_equal(dec_pred, y_pred)
else:
assert decision.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
if not tags["no_validation"]:
if _is_pairwise(classifier):
with raises(
ValueError,
err_msg=msg_pairwise.format(
name, "decision_function"
),
):
classifier.decision_function(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "decision_function"),
):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert y_prob.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
if not tags["no_validation"]:
# raises error on malformed input for predict_proba
if _is_pairwise(classifier_orig):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict_proba"),
):
classifier.predict_proba(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "predict_proba"),
):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
def check_outlier_corruption(num_outliers, expected_outliers, decision):
# Check for deviation from the precise given contamination level that may
# be due to ties in the anomaly scores.
if num_outliers < expected_outliers:
start = num_outliers
end = expected_outliers + 1
else:
start = expected_outliers
end = num_outliers + 1
# ensure that all values in the 'critical area' are tied,
# leading to the observed discrepancy between provided
# and actual contamination levels.
sorted_decision = np.sort(decision)
msg = ('The number of predicted outliers is not equal to the expected '
'number of outliers and this difference is not explained by the '
'number of ties in the decision_function values')
assert len(np.unique(sorted_decision[start:end])) == 1, msg
def check_outliers_train(name, estimator_orig, readonly_memmap=True):
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
if readonly_memmap:
X = create_memmap_backed_data(X)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X)
# with lists
estimator.fit(X.tolist())
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
decision = estimator.decision_function(X)
scores = estimator.score_samples(X)
for output in [decision, scores]:
assert output.dtype == np.dtype('float')
assert output.shape == (n_samples,)
# raises error on malformed input for predict
with raises(ValueError):
estimator.predict(X.T)
# decision_function agrees with predict
dec_pred = (decision >= 0).astype(int)
dec_pred[dec_pred == 0] = -1
assert_array_equal(dec_pred, y_pred)
# raises error on malformed input for decision_function
with raises(ValueError):
estimator.decision_function(X.T)
# decision_function is a translation of score_samples
y_dec = scores - estimator.offset_
assert_allclose(y_dec, decision)
# raises error on malformed input for score_samples
with raises(ValueError):
estimator.score_samples(X.T)
# contamination parameter (not for OneClassSVM which has the nu parameter)
if (hasattr(estimator, 'contamination')
and not hasattr(estimator, 'novelty')):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'. This is true for the training set and cannot thus be
# checked as follows for estimators with a novelty parameter such as
# LocalOutlierFactor (tested in check_outliers_fit_predict)
expected_outliers = 30
contamination = expected_outliers / n_samples
estimator.set_params(contamination=contamination)
estimator.fit(X)
y_pred = estimator.predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method, i.e. all estimators except LOF which is already
# excluded from this if branch.
if num_outliers != expected_outliers:
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
msg = r"contamination must be in \(0, 0.5]"
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
with raises(ValueError, match=msg):
estimator.fit(X)
@ignore_warnings(category=(FutureWarning))
def check_classifiers_multilabel_representation_invariance(
name, classifier_orig
):
X, y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=5, n_labels=3,
length=50, allow_unlabeled=True,
random_state=0)
X_train, y_train = X[:80], y[:80]
X_test = X[80:]
y_train_list_of_lists = y_train.tolist()
y_train_list_of_arrays = list(y_train)
classifier = clone(classifier_orig)
set_random_state(classifier)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
y_pred_list_of_lists = classifier.fit(
X_train, y_train_list_of_lists).predict(X_test)
y_pred_list_of_arrays = classifier.fit(
X_train, y_train_list_of_arrays).predict(X_test)
assert_array_equal(y_pred, y_pred_list_of_arrays)
assert_array_equal(y_pred, y_pred_list_of_lists)
assert y_pred.dtype == y_pred_list_of_arrays.dtype
assert y_pred.dtype == y_pred_list_of_lists.dtype
assert type(y_pred) == type(y_pred_list_of_arrays)
assert type(y_pred) == type(y_pred_list_of_lists)
@ignore_warnings(category=FutureWarning)
def check_estimators_fit_returns_self(
name, estimator_orig, readonly_memmap=False
):
"""Check if self is returned when calling fit."""
X, y = make_blobs(random_state=0, n_samples=21)
# some want non-negative input
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
set_random_state(estimator)
assert estimator.fit(X, y) is estimator
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
# Common test for Regressors, Classifiers and Outlier detection estimators
X, y = _regression_dataset()
estimator = clone(estimator_orig)
for method in ('decision_function', 'predict', 'predict_proba',
'predict_log_proba'):
if hasattr(estimator, method):
with raises(NotFittedError):
getattr(estimator, method)(X)
@ignore_warnings(category=FutureWarning)
def check_supervised_y_2d(name, estimator_orig):
tags = _safe_tags(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _pairwise_estimator_convert_X(
rnd.uniform(size=(n_samples, 3)), estimator_orig
)
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator_orig, y)
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if not tags['multioutput']:
# check that we warned if we don't support multi-output
assert len(w) > 0, msg
assert "DataConversionWarning('A column-vector y" \
" was passed when a 1d array was expected" in msg
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings
def check_classifiers_predictions(X, y, name, classifier_orig):
classes = np.unique(y)
classifier = clone(classifier_orig)
if name == 'BernoulliNB':
X = X > X.mean()
set_random_state(classifier)
classifier.fit(X, y)
y_pred = classifier.predict(X)
if hasattr(classifier, "decision_function"):
decision = classifier.decision_function(X)
assert isinstance(decision, np.ndarray)
if len(classes) == 2:
dec_pred = (decision.ravel() > 0).astype(int)
dec_exp = classifier.classes_[dec_pred]
assert_array_equal(dec_exp, y_pred,
err_msg="decision_function does not match "
"classifier for %r: expected '%s', got '%s'" %
(classifier, ", ".join(map(str, dec_exp)),
", ".join(map(str, y_pred))))
elif getattr(classifier, 'decision_function_shape', 'ovr') == 'ovr':
decision_y = np.argmax(decision, axis=1).astype(int)
y_exp = classifier.classes_[decision_y]
assert_array_equal(y_exp, y_pred,
err_msg="decision_function does not match "
"classifier for %r: expected '%s', got '%s'" %
(classifier, ", ".join(map(str, y_exp)),
", ".join(map(str, y_pred))))
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
# For some specific cases 'ComplementNB' predicts less classes
# than expected
assert_array_equal(np.unique(y), np.unique(y_pred))
assert_array_equal(classes, classifier.classes_,
err_msg="Unexpected classes_ attribute for %r: "
"expected '%s', got '%s'" %
(classifier, ", ".join(map(str, classes)),
", ".join(map(str, classifier.classes_))))
def _choose_check_classifiers_labels(name, y, y_names):
# Semisupervised classifers use -1 as the indicator for an unlabeled
# sample.
return y if name in ["LabelPropagation",
"LabelSpreading",
"SelfTrainingClassifier"] else y_names
def check_classifiers_classes(name, classifier_orig):
X_multiclass, y_multiclass = make_blobs(n_samples=30, random_state=0,
cluster_std=0.1)
X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass,
random_state=7)
X_multiclass = StandardScaler().fit_transform(X_multiclass)
# We need to make sure that we have non negative data, for things
# like NMF
X_multiclass -= X_multiclass.min() - .1
X_binary = X_multiclass[y_multiclass != 2]
y_binary = y_multiclass[y_multiclass != 2]
X_multiclass = _pairwise_estimator_convert_X(X_multiclass, classifier_orig)
X_binary = _pairwise_estimator_convert_X(X_binary, classifier_orig)
labels_multiclass = ["one", "two", "three"]
labels_binary = ["one", "two"]
y_names_multiclass = np.take(labels_multiclass, y_multiclass)
y_names_binary = np.take(labels_binary, y_binary)
problems = [(X_binary, y_binary, y_names_binary)]
if not _safe_tags(classifier_orig, key='binary_only'):
problems.append((X_multiclass, y_multiclass, y_names_multiclass))
for X, y, y_names in problems:
for y_names_i in [y_names, y_names.astype('O')]:
y_ = _choose_check_classifiers_labels(name, y, y_names_i)
check_classifiers_predictions(X, y_, name, classifier_orig)
labels_binary = [-1, 1]
y_names_binary = np.take(labels_binary, y_binary)
y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary)
check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)
@ignore_warnings(category=FutureWarning)
def check_regressors_int(name, regressor_orig):
X, _ = _regression_dataset()
X = _pairwise_estimator_convert_X(X[:50], regressor_orig)
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = _enforce_estimator_tags_y(regressor_orig, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = clone(regressor_orig)
regressor_2 = clone(regressor_orig)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(float))
pred2 = regressor_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
@ignore_warnings(category=FutureWarning)
def check_regressors_train(
name, regressor_orig, readonly_memmap=False, X_dtype=np.float64
):
X, y = _regression_dataset()
X = X.astype(X_dtype)
X = _pairwise_estimator_convert_X(X, regressor_orig)
y = scale(y) # X is already scaled
regressor = clone(regressor_orig)
y = _enforce_estimator_tags_y(regressor, y)
if name in CROSS_DECOMPOSITION:
rnd = np.random.RandomState(0)
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
if readonly_memmap:
X, y, y_ = create_memmap_backed_data([X, y, y_])
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
with raises(
ValueError,
err_msg=f"The classifier {name} does not raise an error when "
"incorrect/malformed input data for fit is passed. The number of "
"training examples is not the same as the number of labels. Perhaps "
"use check_X_y in fit.",
):
regressor.fit(X, y[:-1])
# fit
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert y_pred.shape == y_.shape
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if not _safe_tags(regressor, key="poor_score"):
assert regressor.score(X, y_) > 0.5
@ignore_warnings
def check_regressors_no_decision_function(name, regressor_orig):
# check that regressors don't have a decision_function, predict_proba, or
# predict_log_proba method.
rng = np.random.RandomState(0)
regressor = clone(regressor_orig)
X = rng.normal(size=(10, 4))
X = _pairwise_estimator_convert_X(X, regressor_orig)
y = _enforce_estimator_tags_y(regressor, X[:, 0])
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
assert not hasattr(regressor, func_name)
@ignore_warnings(category=FutureWarning)
def check_class_weight_classifiers(name, classifier_orig):
if _safe_tags(classifier_orig, key='binary_only'):
problems = [2]
else:
problems = [2, 3]
for n_centers in problems:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# can't use gram_if_pairwise() here, setting up gram matrix manually
if _is_pairwise(classifier_orig):
X_test = rbf_kernel(X_test, X_train)
X_train = rbf_kernel(X_train, X_train)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
classifier = clone(classifier_orig).set_params(
class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
if hasattr(classifier, "n_iter_no_change"):
classifier.set_params(n_iter_no_change=20)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# XXX: Generally can use 0.89 here. On Windows, LinearSVC gets
# 0.88 (Issue #9111)
if not _safe_tags(classifier_orig, key='poor_score'):
assert np.mean(y_pred == 0) > 0.87
@ignore_warnings(category=FutureWarning)
def check_class_weight_balanced_classifiers(
name, classifier_orig, X_train, y_train, X_test, y_test, weights
):
classifier = clone(classifier_orig)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert (f1_score(y_test, y_pred_balanced, average='weighted') >
f1_score(y_test, y_pred, average='weighted'))
@ignore_warnings(category=FutureWarning)
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
# this is run on classes, not instances, though this should be changed
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
if hasattr(classifier, "max_iter"):
classifier.set_params(max_iter=1000)
if hasattr(classifier, 'cv'):
classifier.set_params(cv=3)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_allclose(coef_balanced, coef_manual,
err_msg="Classifier %s is not computing"
" class_weight=balanced properly."
% name)
@ignore_warnings(category=FutureWarning)
def check_estimators_overwrite_params(name, estimator_orig):
X, y = make_blobs(random_state=0, n_samples=21)
# some want non-negative input
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert joblib.hash(new_value) == joblib.hash(original_value), (
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
@ignore_warnings(category=FutureWarning)
def check_no_attributes_set_in_init(name, estimator_orig):
"""Check setting during init."""
try:
# Clone fails if the estimator does not store
# all parameters as an attribute during init
estimator = clone(estimator_orig)
except AttributeError:
raise AttributeError(f"Estimator {name} should store all "
"parameters as an attribute during init.")
if hasattr(type(estimator).__init__, "deprecated_original"):
return
init_params = _get_args(type(estimator).__init__)
if IS_PYPY:
# __init__ signature has additional objects in PyPy
for key in ['obj']:
if key in init_params:
init_params.remove(key)
parents_init_params = [param for params_parent in
(_get_args(parent) for parent in
type(estimator).__mro__)
for param in params_parent]
# Test for no setting apart from parameters during init
invalid_attr = (set(vars(estimator)) - set(init_params)
- set(parents_init_params))
assert not invalid_attr, (
"Estimator %s should not set any attribute apart"
" from parameters during init. Found attributes %s."
% (name, sorted(invalid_attr)))
@ignore_warnings(category=FutureWarning)
def check_sparsify_coefficients(name, estimator_orig):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
y = _enforce_estimator_tags_y(estimator_orig, y)
est = clone(estimator_orig)
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert sparse.issparse(est.coef_)
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
@ignore_warnings(category=FutureWarning)
def check_classifier_data_not_an_array(name, estimator_orig):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1],
[0, 3], [1, 0], [2, 0], [4, 4], [2, 3], [3, 2]])
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2])
y = _enforce_estimator_tags_y(estimator_orig, y)
for obj_type in ["NotAnArray", "PandasDataframe"]:
check_estimators_data_not_an_array(name, estimator_orig, X, y,
obj_type)
@ignore_warnings(category=FutureWarning)
def check_regressor_data_not_an_array(name, estimator_orig):
X, y = _regression_dataset()
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = _enforce_estimator_tags_y(estimator_orig, y)
for obj_type in ["NotAnArray", "PandasDataframe"]:
check_estimators_data_not_an_array(name, estimator_orig, X, y,
obj_type)
@ignore_warnings(category=FutureWarning)
def check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type):
if name in CROSS_DECOMPOSITION:
raise SkipTest("Skipping check_estimators_data_not_an_array "
"for cross decomposition module as estimators "
"are not deterministic.")
# separate estimators to control random seeds
estimator_1 = clone(estimator_orig)
estimator_2 = clone(estimator_orig)
set_random_state(estimator_1)
set_random_state(estimator_2)
if obj_type not in ["NotAnArray", 'PandasDataframe']:
raise ValueError("Data type {0} not supported".format(obj_type))
if obj_type == "NotAnArray":
y_ = _NotAnArray(np.asarray(y))
X_ = _NotAnArray(np.asarray(X))
else:
# Here pandas objects (Series and DataFrame) are tested explicitly
# because some estimators may handle them (especially their indexing)
# specially.
try:
import pandas as pd
y_ = np.asarray(y)
if y_.ndim == 1:
y_ = pd.Series(y_)
else:
y_ = pd.DataFrame(y_)
X_ = pd.DataFrame(np.asarray(X))
except ImportError:
raise SkipTest("pandas is not installed: not checking estimators "
"for pandas objects.")
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)
def check_parameters_default_constructible(name, Estimator):
# test default-constructibility
# get rid of deprecation warnings
Estimator = Estimator.__class__
with ignore_warnings(category=FutureWarning):
estimator = _construct_instance(Estimator)
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert estimator.set_params() is estimator
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator."""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
# they can need a non-default argument
init_params = init_params[len(getattr(
estimator, '_required_parameters', [])):]
for init_param in init_params:
assert init_param.default != init_param.empty, (
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
allowed_types = {
str,
int,
float,
bool,
tuple,
type(None),
type,
types.FunctionType,
joblib.Memory,
}
# Any numpy numeric such as np.int32.
allowed_types.update(np.core.numerictypes.allTypes.values())
assert type(init_param.default) in allowed_types, (
f"Parameter '{init_param.name}' of estimator "
f"'{Estimator.__name__}' is of type "
f"{type(init_param.default).__name__} which is not "
f"allowed. All init parameters have to be immutable to "
f"make cloning possible. Therefore we restrict the set of "
f"legal types to "
f"{set(type.__name__ for type in allowed_types)}."
)
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert init_param.default is None, (
f"Estimator parameter '{init_param.name}' of estimator "
f"'{Estimator.__name__}' is not returned by get_params. "
f"If it is deprecated, set its default value to None."
)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
failure_text = (
f"Parameter {init_param.name} was mutated on init. All "
f"parameters must be stored unchanged."
)
if is_scalar_nan(param_value):
# Allows to set default parameters to np.nan
assert param_value is init_param.default, failure_text
else:
assert param_value == init_param.default, failure_text
def _enforce_estimator_tags_y(estimator, y):
# Estimators with a `requires_positive_y` tag only accept strictly positive
# data
if _safe_tags(estimator, key="requires_positive_y"):
# Create strictly positive y. The minimal increment above 0 is 1, as
# y could be of integer dtype.
y += 1 + abs(y.min())
# Estimators with a `binary_only` tag only accept up to two unique y values
if _safe_tags(estimator, key="binary_only") and y.size > 0:
y = np.where(y == y.flat[0], y, y.flat[0] + 1)
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if _safe_tags(estimator, key="multioutput_only"):
return np.reshape(y, (-1, 1))
return y
def _enforce_estimator_tags_x(estimator, X):
# Pairwise estimators only accept
# X of shape (`n_samples`, `n_samples`)
if _is_pairwise(estimator):
X = X.dot(X.T)
# Estimators with `1darray` in `X_types` tag only accept
# X of shape (`n_samples`,)
if '1darray' in _safe_tags(estimator, key='X_types'):
X = X[:, 0]
# Estimators with a `requires_positive_X` tag only accept
# strictly positive data
if _safe_tags(estimator, key='requires_positive_X'):
X -= X.min()
return X
@ignore_warnings(category=FutureWarning)
def check_non_transformer_estimators_n_iter(name, estimator_orig):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
# SelfTrainingClassifier does not perform an iteration if all samples are
# labeled, hence n_iter_ = 0 is valid.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression', 'SelfTrainingClassifier']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = clone(estimator_orig).set_params(alpha=0.)
else:
estimator = clone(estimator_orig)
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = _enforce_estimator_tags_y(estimator, y_)
set_random_state(estimator, 0)
estimator.fit(X, y_)
assert estimator.n_iter_ >= 1
@ignore_warnings(category=FutureWarning)
def check_transformer_n_iter(name, estimator_orig):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = clone(estimator_orig)
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert iter_ >= 1
else:
assert estimator.n_iter_ >= 1
@ignore_warnings(category=FutureWarning)
def check_get_params_invariance(name, estimator_orig):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
e = clone(estimator_orig)
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert all(item in deep_params.items() for item in
shallow_params.items())
@ignore_warnings(category=FutureWarning)
def check_set_params(name, estimator_orig):
# Check that get_params() returns the same thing
# before and after set_params() with some fuzz
estimator = clone(estimator_orig)
orig_params = estimator.get_params(deep=False)
msg = "get_params result does not match what was passed to set_params"
estimator.set_params(**orig_params)
curr_params = estimator.get_params(deep=False)
assert set(orig_params.keys()) == set(curr_params.keys()), msg
for k, v in curr_params.items():
assert orig_params[k] is v, msg
# some fuzz values
test_values = [-np.inf, np.inf, None]
test_params = deepcopy(orig_params)
for param_name in orig_params.keys():
default_value = orig_params[param_name]
for value in test_values:
test_params[param_name] = value
try:
estimator.set_params(**test_params)
except (TypeError, ValueError) as e:
e_type = e.__class__.__name__
# Exception occurred, possibly parameter validation
warnings.warn("{0} occurred during set_params of param {1} on "
"{2}. It is recommended to delay parameter "
"validation until fit.".format(e_type,
param_name,
name))
change_warning_msg = "Estimator's parameters changed after " \
"set_params raised {}".format(e_type)
params_before_exception = curr_params
curr_params = estimator.get_params(deep=False)
try:
assert (set(params_before_exception.keys()) ==
set(curr_params.keys()))
for k, v in curr_params.items():
assert params_before_exception[k] is v
except AssertionError:
warnings.warn(change_warning_msg)
else:
curr_params = estimator.get_params(deep=False)
assert (set(test_params.keys()) ==
set(curr_params.keys())), msg
for k, v in curr_params.items():
assert test_params[k] is v, msg
test_params[param_name] = default_value
@ignore_warnings(category=FutureWarning)
def check_classifiers_regression_target(name, estimator_orig):
# Check if classifier throws an exception when fed regression targets
X, y = _regression_dataset()
X = X + 1 + abs(X.min(axis=0)) # be sure that X is non-negative
e = clone(estimator_orig)
msg = "Unknown label type: "
if not _safe_tags(e, key="no_validation"):
with raises(ValueError, match=msg):
e.fit(X, y)
@ignore_warnings(category=FutureWarning)
def check_decision_proba_consistency(name, estimator_orig):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
estimator = clone(estimator_orig)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X_train, y_train)
# Since the link function from decision_function() to predict_proba()
# is sometimes not precise enough (typically expit), we round to the
# 10th decimal to avoid numerical issues: we compare the rank
# with deterministic ties rather than get platform specific rank
# inversions in case of machine level differences.
a = estimator.predict_proba(X_test)[:, 1].round(decimals=10)
b = estimator.decision_function(X_test).round(decimals=10)
assert_array_equal(rankdata(a), rankdata(b))
def check_outliers_fit_predict(name, estimator_orig):
# Check fit_predict for outlier detectors.
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
y_pred = estimator.fit_predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
# check fit_predict = fit.predict when the estimator has both a predict and
# a fit_predict method. recall that it is already assumed here that the
# estimator has a fit_predict method
if hasattr(estimator, 'predict'):
y_pred_2 = estimator.fit(X).predict(X)
assert_array_equal(y_pred, y_pred_2)
if hasattr(estimator, "contamination"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'
expected_outliers = 30
contamination = float(expected_outliers)/n_samples
estimator.set_params(contamination=contamination)
y_pred = estimator.fit_predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method
if (num_outliers != expected_outliers and
hasattr(estimator, 'decision_function')):
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
msg = r"contamination must be in \(0, 0.5]"
for contamination in [-0.5, -0.001, 0.5001, 2.3]:
estimator.set_params(contamination=contamination)
with raises(ValueError, match=msg):
estimator.fit_predict(X)
def check_fit_non_negative(name, estimator_orig):
# Check that proper warning is raised for non-negative X
# when tag requires_positive_X is present
X = np.array([[-1., 1], [-1., 1]])
y = np.array([1, 2])
estimator = clone(estimator_orig)
with raises(ValueError):
estimator.fit(X, y)
def check_fit_idempotent(name, estimator_orig):
# Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would
# check that the estimated parameters during training (e.g. coefs_) are
# the same, but having a universal comparison function for those
# attributes is difficult and full of edge cases. So instead we check that
# predict(), predict_proba(), decision_function() and transform() return
# the same results.
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params().keys():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
train, test = next(ShuffleSplit(test_size=.2, random_state=rng).split(X))
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
# Fit for the first time
estimator.fit(X_train, y_train)
result = {method: getattr(estimator, method)(X_test)
for method in check_methods
if hasattr(estimator, method)}
# Fit again
set_random_state(estimator)
estimator.fit(X_train, y_train)
for method in check_methods:
if hasattr(estimator, method):
new_result = getattr(estimator, method)(X_test)
if np.issubdtype(new_result.dtype, np.floating):
tol = 2*np.finfo(new_result.dtype).eps
else:
tol = 2*np.finfo(np.float64).eps
assert_allclose_dense_sparse(
result[method], new_result,
atol=max(tol, 1e-9), rtol=max(tol, 1e-7),
err_msg="Idempotency check failed for method {}".format(method)
)
def check_n_features_in(name, estimator_orig):
# Make sure that n_features_in_ attribute doesn't exist until fit is
# called, and that its value is correct.
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
assert not hasattr(estimator, 'n_features_in_')
estimator.fit(X, y)
if hasattr(estimator, 'n_features_in_'):
assert estimator.n_features_in_ == X.shape[1]
else:
warnings.warn(
"As of scikit-learn 0.23, estimators should expose a "
"n_features_in_ attribute, unless the 'no_validation' tag is "
"True. This attribute should be equal to the number of features "
"passed to the fit method. "
"An error will be raised from version 1.0 (renaming of 0.25) "
"when calling check_estimator(). "
"See SLEP010: "
"https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html", # noqa
FutureWarning
)
def check_requires_y_none(name, estimator_orig):
# Make sure that an estimator with requires_y=True fails gracefully when
# given y=None
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
warning_msg = ("As of scikit-learn 0.23, estimators should have a "
"'requires_y' tag set to the appropriate value. "
"The default value of the tag is False. "
"An error will be raised from version 1.0 when calling "
"check_estimator() if the tag isn't properly set.")
expected_err_msgs = (
"requires y to be passed, but the target y is None",
"Expected array-like (array or non-string sequence), got None",
"y should be a 1d array"
)
try:
estimator.fit(X, None)
except ValueError as ve:
if not any(msg in str(ve) for msg in expected_err_msgs):
warnings.warn(warning_msg, FutureWarning)
def check_n_features_in_after_fitting(name, estimator_orig):
# Make sure that n_features_in are checked after fitting
tags = _safe_tags(estimator_orig)
if "2darray" not in tags["X_types"] or tags["no_validation"]:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)
n_samples = 150
X = rng.normal(size=(n_samples, 8))
X = _enforce_estimator_tags_x(estimator, X)
X = _pairwise_estimator_convert_X(X, estimator)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
assert estimator.n_features_in_ == X.shape[1]
# check methods will check n_features_in_
check_methods = ["predict", "transform", "decision_function",
"predict_proba", "score"]
X_bad = X[:, [1]]
msg = (f"X has 1 features, but \\w+ is expecting {X.shape[1]} "
"features as input")
for method in check_methods:
if not hasattr(estimator, method):
continue
callable_method = getattr(estimator, method)
if method == "score":
callable_method = partial(callable_method, y=y)
with raises(ValueError, match=msg):
callable_method(X_bad)
# partial_fit will check in the second call
if not hasattr(estimator, "partial_fit"):
return
estimator = clone(estimator_orig)
if is_classifier(estimator):
estimator.partial_fit(X, y, classes=np.unique(y))
else:
estimator.partial_fit(X, y)
assert estimator.n_features_in_ == X.shape[1]
with raises(ValueError, match=msg):
estimator.partial_fit(X_bad, y)
def check_estimator_get_tags_default_keys(name, estimator_orig):
# check that if _get_tags is implemented, it contains all keys from
# _DEFAULT_KEYS
estimator = clone(estimator_orig)
if not hasattr(estimator, "_get_tags"):
return
tags_keys = set(estimator._get_tags().keys())
default_tags_keys = set(_DEFAULT_TAGS.keys())
assert tags_keys.intersection(default_tags_keys) == default_tags_keys, (
f"{name}._get_tags() is missing entries for the following default tags"
f": {default_tags_keys - tags_keys.intersection(default_tags_keys)}"
)
| bsd-3-clause |
linebp/pandas | pandas/tests/io/parser/dialect.py | 20 | 2039 | # -*- coding: utf-8 -*-
"""
Tests that dialects are properly handled during parsing
for all of the parsers defined in parsers.py
"""
import csv
from pandas import DataFrame
from pandas.compat import StringIO
from pandas.errors import ParserWarning
import pandas.util.testing as tm
class DialectTests(object):
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
with tm.assert_produces_warning(ParserWarning):
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
csv.register_dialect('mydialect', delimiter=':')
with tm.assert_produces_warning(ParserWarning):
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_invalid_dialect(self):
class InvalidDialect(object):
pass
data = 'a\n1'
msg = 'Invalid dialect'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), dialect=InvalidDialect)
def test_dialect_conflict(self):
data = 'a,b\n1,2'
dialect = 'excel'
exp = DataFrame({'a': [1], 'b': [2]})
with tm.assert_produces_warning(None):
df = self.read_csv(StringIO(data), delimiter=',', dialect=dialect)
tm.assert_frame_equal(df, exp)
with tm.assert_produces_warning(ParserWarning):
df = self.read_csv(StringIO(data), delimiter='.', dialect=dialect)
tm.assert_frame_equal(df, exp)
| bsd-3-clause |
kagayakidan/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/io/tests/test_stata.py | 7 | 56765 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
import datetime as dt
import os
import struct
import sys
import warnings
from datetime import datetime
from distutils.version import LooseVersion
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import compat
from pandas.compat import iterkeys
from pandas.core.frame import DataFrame, Series
from pandas.io.parsers import read_csv
from pandas.io.stata import (read_stata, StataReader, InvalidColumnName,
PossiblePrecisionLoss, StataMissingValue)
from pandas.tslib import NaT
from pandas.types.common import is_categorical_dtype
class TestStata(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.dta1_114 = os.path.join(self.dirpath, 'stata1_114.dta')
self.dta1_117 = os.path.join(self.dirpath, 'stata1_117.dta')
self.dta2_113 = os.path.join(self.dirpath, 'stata2_113.dta')
self.dta2_114 = os.path.join(self.dirpath, 'stata2_114.dta')
self.dta2_115 = os.path.join(self.dirpath, 'stata2_115.dta')
self.dta2_117 = os.path.join(self.dirpath, 'stata2_117.dta')
self.dta3_113 = os.path.join(self.dirpath, 'stata3_113.dta')
self.dta3_114 = os.path.join(self.dirpath, 'stata3_114.dta')
self.dta3_115 = os.path.join(self.dirpath, 'stata3_115.dta')
self.dta3_117 = os.path.join(self.dirpath, 'stata3_117.dta')
self.csv3 = os.path.join(self.dirpath, 'stata3.csv')
self.dta4_113 = os.path.join(self.dirpath, 'stata4_113.dta')
self.dta4_114 = os.path.join(self.dirpath, 'stata4_114.dta')
self.dta4_115 = os.path.join(self.dirpath, 'stata4_115.dta')
self.dta4_117 = os.path.join(self.dirpath, 'stata4_117.dta')
self.dta_encoding = os.path.join(self.dirpath, 'stata1_encoding.dta')
self.csv14 = os.path.join(self.dirpath, 'stata5.csv')
self.dta14_113 = os.path.join(self.dirpath, 'stata5_113.dta')
self.dta14_114 = os.path.join(self.dirpath, 'stata5_114.dta')
self.dta14_115 = os.path.join(self.dirpath, 'stata5_115.dta')
self.dta14_117 = os.path.join(self.dirpath, 'stata5_117.dta')
self.csv15 = os.path.join(self.dirpath, 'stata6.csv')
self.dta15_113 = os.path.join(self.dirpath, 'stata6_113.dta')
self.dta15_114 = os.path.join(self.dirpath, 'stata6_114.dta')
self.dta15_115 = os.path.join(self.dirpath, 'stata6_115.dta')
self.dta15_117 = os.path.join(self.dirpath, 'stata6_117.dta')
self.dta16_115 = os.path.join(self.dirpath, 'stata7_115.dta')
self.dta16_117 = os.path.join(self.dirpath, 'stata7_117.dta')
self.dta17_113 = os.path.join(self.dirpath, 'stata8_113.dta')
self.dta17_115 = os.path.join(self.dirpath, 'stata8_115.dta')
self.dta17_117 = os.path.join(self.dirpath, 'stata8_117.dta')
self.dta18_115 = os.path.join(self.dirpath, 'stata9_115.dta')
self.dta18_117 = os.path.join(self.dirpath, 'stata9_117.dta')
self.dta19_115 = os.path.join(self.dirpath, 'stata10_115.dta')
self.dta19_117 = os.path.join(self.dirpath, 'stata10_117.dta')
self.dta20_115 = os.path.join(self.dirpath, 'stata11_115.dta')
self.dta20_117 = os.path.join(self.dirpath, 'stata11_117.dta')
self.dta21_117 = os.path.join(self.dirpath, 'stata12_117.dta')
self.dta22_118 = os.path.join(self.dirpath, 'stata14_118.dta')
self.dta23 = os.path.join(self.dirpath, 'stata15.dta')
self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta')
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
def test_read_empty_dta(self):
empty_ds = DataFrame(columns=['unit'])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path, write_index=False)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
with StataReader(self.dta1_114) as rdr:
with warnings.catch_warnings(record=True) as w: # noqa
parsed_114_data = rdr.data()
with StataReader(self.dta1_114) as rdr:
parsed_114_read = rdr.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
def test_read_dta1(self):
parsed_114 = self.read_dta(self.dta1_114)
parsed_117 = self.read_dta(self.dta1_117)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected['float_miss'] = expected['float_miss'].astype(np.float32)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta2(self):
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('datetime interp under 2.6 is faulty')
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1)
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1)
),
(
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
pd.NaT,
)
],
columns=['datetime_c', 'datetime_big_c', 'date', 'weekly_date',
'monthly_date', 'quarterly_date', 'half_yearly_date',
'yearly_date']
)
expected['yearly_date'] = expected['yearly_date'].astype('O')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
self.assertEqual(len(w), 3)
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_115, expected,
check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_117, expected,
check_datetimelike_compat=True)
def test_read_dta3(self):
parsed_113 = self.read_dta(self.dta3_113)
parsed_114 = self.read_dta(self.dta3_114)
parsed_115 = self.read_dta(self.dta3_115)
parsed_117 = self.read_dta(self.dta3_117)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected['year'] = expected['year'].astype(np.int16)
expected['quarter'] = expected['quarter'].astype(np.int8)
tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected)
tm.assert_frame_equal(parsed_115, expected)
tm.assert_frame_equal(parsed_117, expected)
def test_read_dta4(self):
parsed_113 = self.read_dta(self.dta4_113)
parsed_114 = self.read_dta(self.dta4_114)
parsed_115 = self.read_dta(self.dta4_115)
parsed_117 = self.read_dta(self.dta4_117)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled'])
# these are all categoricals
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
# stata doesn't save .category metadata
tm.assert_frame_equal(parsed_113, expected, check_categorical=False)
tm.assert_frame_equal(parsed_114, expected, check_categorical=False)
tm.assert_frame_equal(parsed_115, expected, check_categorical=False)
tm.assert_frame_equal(parsed_117, expected, check_categorical=False)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=['x', 'y', 'z'])
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_dta18(self):
parsed_118 = self.read_dta(self.dta22_118)
parsed_118["Bytes"] = parsed_118["Bytes"].astype('O')
expected = DataFrame.from_records(
[['Cat', 'Bogota', u'Bogotá', 1, 1.0, u'option b Ünicode', 1.0],
['Dog', 'Boston', u'Uzunköprü', np.nan, np.nan, np.nan, np.nan],
['Plane', 'Rome', u'Tromsø', 0, 0.0, 'option a', 0.0],
['Potato', 'Tokyo', u'Elâzığ', -4, 4.0, 4, 4],
['', '', '', 0, 0.3332999, 'option a', 1 / 3.]
],
columns=['Things', 'Cities', 'Unicode_Cities_Strl',
'Ints', 'Floats', 'Bytes', 'Longs'])
expected["Floats"] = expected["Floats"].astype(np.float32)
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
with StataReader(self.dta22_118) as rdr:
vl = rdr.variable_labels()
vl_expected = {u'Unicode_Cities_Strl':
u'Here are some strls with Ünicode chars',
u'Longs': u'long data',
u'Things': u'Here are some things',
u'Bytes': u'byte data',
u'Ints': u'int data',
u'Cities': u'Here are some cities',
u'Floats': u'float data'}
tm.assert_dict_equal(vl, vl_expected)
self.assertEqual(rdr.data_label, u'This is a Ünicode data label')
def test_read_write_dta5(self):
original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=['float_miss', 'double_miss', 'byte_miss',
'int_miss', 'long_miss'])
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['year'] = original['year'].astype(np.int32)
original['quarter'] = original['quarter'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
def test_read_write_dta10(self):
original = DataFrame(data=[["string", "object", 1, 1.1,
np.datetime64('2003-12-25')]],
columns=['string', 'object', 'integer',
'floating', 'datetime'])
original["object"] = Series(original["object"], dtype=object)
original.index.name = 'index'
original.index = original.index.astype(np.int32)
original['integer'] = original['integer'].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {'datetime': 'tc'})
written_and_read_again = self.read_dta(path)
# original.index is np.int32, readed index is np.int64
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original, check_index_type=False)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5, 4), columns=list('abcd'))
df.ix[2, 'a':'c'] = np.nan
df_copy = df.copy()
with tm.ensure_clean() as path:
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
def test_encoding(self):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
if compat.PY3:
expected = raw.kreis1849[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, compat.string_types)
else:
expected = raw.kreis1849.str.decode("latin-1")[0]
self.assertEqual(result, expected)
self.assertIsInstance(result, unicode) # noqa
with tm.ensure_clean() as path:
encoded.to_stata(path, encoding='latin-1', write_index=False)
reread_encoded = read_stata(path, encoding='latin-1')
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame([(1, 2, 3, 4)],
columns=['good', compat.u('b\u00E4d'), '8number',
'astringwithmorethan32characters______'])
formatted = DataFrame([(1, 2, 3, 4)],
columns=['good', 'b_d', '_8number',
'astringwithmorethan32characters_'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
self.assertEqual(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
def test_read_write_dta12(self):
original = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_1',
'astringwithmorethan32characters_2',
'+',
'-',
'short',
'delete'])
formatted = DataFrame([(1, 2, 3, 4, 5, 6)],
columns=['astringwithmorethan32characters_',
'_0astringwithmorethan32character',
'_',
'_1_',
'_short',
'_delete'])
formatted.index.name = 'index'
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
original.to_stata(path, None)
# should get a warning for that format.
self.assertEqual(len(w), 1)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), formatted)
def test_read_write_dta13(self):
s1 = Series(2 ** 9, dtype=np.int16)
s2 = Series(2 ** 17, dtype=np.int32)
s3 = Series(2 ** 33, dtype=np.int64)
original = DataFrame({'int16': s1, 'int32': s2, 'int64': s3})
original.index.name = 'index'
formatted = original
formatted['int64'] = formatted['int64'].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
formatted)
def test_read_write_reread_dta14(self):
expected = self.read_csv(self.csv14)
cols = ['byte_', 'int_', 'long_', 'float_', 'double_']
for col in cols:
expected[col] = expected[col]._convert(datetime=True, numeric=True)
expected['float_'] = expected['float_'].astype(np.float32)
expected['date_td'] = pd.to_datetime(
expected['date_td'], errors='coerce')
parsed_113 = self.read_dta(self.dta14_113)
parsed_113.index.name = 'index'
parsed_114 = self.read_dta(self.dta14_114)
parsed_114.index.name = 'index'
parsed_115 = self.read_dta(self.dta14_115)
parsed_115.index.name = 'index'
parsed_117 = self.read_dta(self.dta14_117)
parsed_117.index.name = 'index'
tm.assert_frame_equal(parsed_114, parsed_113)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {'date_td': 'td'})
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index('index'), parsed_114)
def test_read_write_reread_dta15(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(
datetime.strptime, args=('%Y-%m-%d',))
parsed_113 = self.read_dta(self.dta15_113)
parsed_114 = self.read_dta(self.dta15_114)
parsed_115 = self.read_dta(self.dta15_115)
parsed_117 = self.read_dta(self.dta15_117)
tm.assert_frame_equal(expected, parsed_114)
tm.assert_frame_equal(parsed_113, parsed_114)
tm.assert_frame_equal(parsed_114, parsed_115)
tm.assert_frame_equal(parsed_114, parsed_117)
def test_timestamp_and_label(self):
original = DataFrame([(1,)], columns=['var'])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = 'This is a data file.'
with tm.ensure_clean() as path:
original.to_stata(path, time_stamp=time_stamp,
data_label=data_label)
with StataReader(path) as reader:
parsed_time_stamp = dt.datetime.strptime(
reader.time_stamp, ('%d %b %Y %H:%M'))
assert parsed_time_stamp == time_stamp
assert reader.data_label == data_label
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = 'index'
with tm.ensure_clean() as path:
# should get a warning for that format.
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
def test_nan_to_missing_value(self):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ['x', 'y']
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)),
columns=columns)
original.index.name = 'index_not_written'
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
tm.assertRaises(
KeyError, lambda: written_and_read_again['index_not_written'])
def test_string_no_dates(self):
s1 = Series(['a', 'A longer string'])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({'s1': s1, 's2': s2})
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3})
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(PossiblePrecisionLoss):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified['s1'] = Series(modified['s1'], dtype=np.int16)
modified['s2'] = Series(modified['s2'], dtype=np.int32)
modified['s3'] = Series(modified['s3'], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = 'index'
with tm.ensure_clean() as path:
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path, {0: 'tc'})
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ['_0']
tm.assert_frame_equal(written_and_read_again.set_index('index'),
modified)
def test_105(self):
# Data obtained from:
# http://go.worldbank.org/ZXY29PVJ21
dpath = os.path.join(self.dirpath, 'S4_EDUC1.dta')
df = pd.read_stata(dpath)
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
df0 = pd.DataFrame(df0)
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
df0['clustnum'] = df0["clustnum"].astype(np.int16)
df0['pri_schl'] = df0["pri_schl"].astype(np.int8)
df0['psch_num'] = df0["psch_num"].astype(np.int8)
df0['psch_dis'] = df0["psch_dis"].astype(np.float32)
tm.assert_frame_equal(df.head(3), df0)
def test_date_export_formats(self):
columns = ['tc', 'td', 'tw', 'tm', 'tq', 'th', 'ty']
conversions = dict(((c, c) for c in columns))
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = 'index'
expected_values = [datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1)] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected)
def test_bool_uint(self):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame({'s0': s0, 's1': s1, 's2': s2, 's3': s3,
's4': s4, 's5': s5, 's6': s6})
original.index.name = 'index'
expected = original.copy()
expected_types = (np.int8, np.int8, np.int16, np.int16, np.int32,
np.int32, np.float64)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index('index')
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
with StataReader(self.dta16_115) as rdr:
sr_115 = rdr.variable_labels()
with StataReader(self.dta16_117) as rdr:
sr_117 = rdr.variable_labels()
keys = ('var1', 'var2', 'var3')
labels = ('label1', 'label2', 'label3')
for k, v in compat.iteritems(sr_115):
self.assertTrue(k in sr_117)
self.assertTrue(v == sr_117[k])
self.assertTrue(k in keys)
self.assertTrue(v in labels)
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
with StataReader(path) as sr:
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
self.assertTrue(int(variable[1:]) == int(fmt[1:-1]))
self.assertTrue(int(variable[1:]) == typ)
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s['s' + str(str_len)] = Series(['a' * str_len,
'b' * str_len, 'c' * str_len])
original = DataFrame(s)
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ('b', 'h', 'l')
df = DataFrame([[0.0]], columns=['float_'])
with tm.ensure_clean() as path:
df.to_stata(path)
with StataReader(path) as rdr:
valid_range = rdr.VALID_RANGE
expected_values = ['.' + chr(97 + i) for i in range(26)]
expected_values.insert(0, '.')
for t in types:
offset = valid_range[t][1]
for i in range(0, 27):
val = StataMissingValue(offset + 1 + i)
self.assertTrue(val.string == expected_values[i])
# Test extremes for floats
val = StataMissingValue(struct.unpack('<f', b'\x00\x00\x00\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack('<f', b'\x00\xd0\x00\x7f')[0])
self.assertTrue(val.string == '.z')
# Test extremes for floats
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
self.assertTrue(val.string == '.')
val = StataMissingValue(struct.unpack(
'<d', b'\x00\x00\x00\x00\x00\x1a\xe0\x7f')[0])
self.assertTrue(val.string == '.z')
def test_missing_value_conversion(self):
columns = ['int8_', 'int16_', 'int32_', 'float32_', 'float64_']
smv = StataMissingValue(101)
keys = [key for key in iterkeys(smv.MISSING_VALUES)]
keys.sort()
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed_113 = read_stata(self.dta17_113, convert_missing=True)
parsed_115 = read_stata(self.dta17_115, convert_missing=True)
parsed_117 = read_stata(self.dta17_117, convert_missing=True)
tm.assert_frame_equal(expected, parsed_113)
tm.assert_frame_equal(expected, parsed_115)
tm.assert_frame_equal(expected, parsed_117)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(
datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([NaT] * 7)
columns = ['date_tc', 'date_td', 'date_tw', 'date_tm', 'date_tq',
'date_th', 'date_ty']
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999, 12, 24)
expected[2][3] = datetime(9999, 12, 1)
expected[2][4] = datetime(9999, 10, 1)
expected[2][5] = datetime(9999, 7, 1)
expected[4][2] = datetime(2262, 4, 16)
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
expected[5][2] = expected[5][3] = expected[
5][4] = datetime(1677, 10, 1)
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115,
check_datetimelike_compat=True)
tm.assert_frame_equal(expected, parsed_117,
check_datetimelike_compat=True)
date_conversion = dict((c, c[-2:]) for c in columns)
# {c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = 'index'
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index('index'),
expected,
check_datetimelike_compat=True)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
no_conversion = read_stata(self.dta15_117,
convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(self.dta15_117,
convert_dates=True,
preserve_dtypes=False)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected['byte_'] = expected['byte_'].astype(np.int8)
expected['int_'] = expected['int_'].astype(np.int16)
expected['long_'] = expected['long_'].astype(np.int32)
expected['float_'] = expected['float_'].astype(np.float32)
expected['double_'] = expected['double_'].astype(np.float64)
expected['date_td'] = expected['date_td'].apply(datetime.strptime,
args=('%Y-%m-%d',))
columns = ['byte_', 'int_', 'long_']
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, dropped)
# See PR 10757
columns = ['int_', 'long_', 'byte_']
expected = expected[columns]
reordered = read_stata(self.dta15_117, convert_dates=True,
columns=columns)
tm.assert_frame_equal(expected, reordered)
with tm.assertRaises(ValueError):
columns = ['byte_', 'byte_']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
with tm.assertRaises(ValueError):
columns = ['byte_', 'int_', 'long_', 'not_found']
read_stata(self.dta15_117, convert_dates=True, columns=columns)
def test_categorical_writing(self):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10]
],
columns=['fully_labeled', 'fully_labeled2', 'incompletely_labeled',
'labeled_with_missings', 'float_labelled', 'unlabeled'])
expected = original.copy()
# these are all categoricals
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
expected['incompletely_labeled'] = expected[
'incompletely_labeled'].apply(str)
expected['unlabeled'] = expected['unlabeled'].apply(str)
expected = pd.concat([expected[col].astype('category')
for col in expected], axis=1)
expected.index.name = 'index'
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w: # noqa
# Silence warnings
original.to_stata(path)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index('index')
tm.assert_frame_equal(res, expected, check_categorical=False)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[['a' * 10000],
['b' * 10000],
['c' * 10000],
['d' * 10000]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with tm.ensure_clean() as path:
tm.assertRaises(ValueError, original.to_stata, path)
original = pd.DataFrame.from_records(
[['a'],
['b'],
['c'],
['d'],
[1]],
columns=['Too_long'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
with warnings.catch_warnings(record=True) as w:
original.to_stata(path)
# should get a warning for mixed content
self.assertEqual(len(w), 1)
def test_categorical_with_stata_missing_values(self):
values = [['a' + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=['many_labels'])
original = pd.concat([original[col].astype('category')
for col in original], axis=1)
original.index.name = 'index'
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index('index')
tm.assert_frame_equal(res, original, check_categorical=False)
def test_categorical_order(self):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [(True, 'ordered', ['a', 'b', 'c', 'd', 'e'], np.arange(5)),
(True, 'reverse', ['a', 'b', 'c',
'd', 'e'], np.arange(5)[::-1]),
(True, 'noorder', ['a', 'b', 'c', 'd',
'e'], np.array([2, 1, 4, 0, 3])),
(True, 'floating', [
'a', 'b', 'c', 'd', 'e'], np.arange(0, 5)),
(True, 'float_missing', [
'a', 'd', 'e'], np.array([0, 1, 2, -1, -1])),
(False, 'nolabel', [
1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, 'int32_mixed', ['d', 2, 'e', 'b', 'a'],
np.arange(5))]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_items(cols)
# Read with and with out categoricals, ensure order is identical
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
tm.assert_frame_equal(expected, parsed_115, check_categorical=False)
tm.assert_frame_equal(expected, parsed_117, check_categorical=False)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes,
parsed_115[col].cat.codes)
tm.assert_index_equal(expected[col].cat.categories,
parsed_115[col].cat.categories)
def test_categorical_sorting(self):
parsed_115 = read_stata(self.dta20_115)
parsed_117 = read_stata(self.dta20_117)
# Sort based on codes, not strings
parsed_115 = parsed_115.sort_values("srh")
parsed_117 = parsed_117.sort_values("srh")
# Don't sort index
parsed_115.index = np.arange(parsed_115.shape[0])
parsed_117.index = np.arange(parsed_117.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(codes=codes, categories=categories)
expected = pd.Series(cat, name='srh')
tm.assert_series_equal(expected, parsed_115["srh"],
check_categorical=False)
tm.assert_series_equal(expected, parsed_117["srh"],
check_categorical=False)
def test_categorical_ordering(self):
parsed_115 = read_stata(self.dta19_115)
parsed_117 = read_stata(self.dta19_117)
parsed_115_unordered = read_stata(self.dta19_115,
order_categoricals=False)
parsed_117_unordered = read_stata(self.dta19_117,
order_categoricals=False)
for col in parsed_115:
if not is_categorical_dtype(parsed_115[col]):
continue
self.assertEqual(True, parsed_115[col].cat.ordered)
self.assertEqual(True, parsed_117[col].cat.ordered)
self.assertEqual(False, parsed_115_unordered[col].cat.ordered)
self.assertEqual(False, parsed_117_unordered[col].cat.ordered)
def test_read_chunks_117(self):
files_117 = [self.dta1_117, self.dta2_117, self.dta3_117,
self.dta4_117, self.dta14_117, self.dta15_117,
self.dta16_117, self.dta17_117, self.dta18_117,
self.dta19_117, self.dta20_117]
for fname in files_117:
for chunksize in 1, 2:
for convert_categoricals in False, True:
for convert_dates in False, True:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
itr = read_stata(
fname, iterator=True,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False)
pos += chunksize
itr.close()
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
with read_stata(fname, iterator=True) as itr:
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
with read_stata(fname, iterator=True) as itr:
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
# GH12153
from_chunks = pd.concat(read_stata(fname, chunksize=4))
tm.assert_frame_equal(parsed, from_chunks)
def test_read_chunks_115(self):
files_115 = [self.dta2_115, self.dta3_115, self.dta4_115,
self.dta14_115, self.dta15_115, self.dta16_115,
self.dta17_115, self.dta18_115, self.dta19_115,
self.dta20_115]
for fname in files_115:
for chunksize in 1, 2:
for convert_categoricals in False, True:
for convert_dates in False, True:
# Read the whole file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates)
# Compare to what we get when reading by chunk
itr = read_stata(
fname, iterator=True,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(
from_frame, chunk, check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False)
pos += chunksize
itr.close()
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ['quarter', 'cpi', 'm1']
chunksize = 2
parsed = read_stata(fname, columns=columns)
with read_stata(fname, iterator=True) as itr:
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos:pos + chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
def test_write_variable_labels(self):
# GH 13631, add support for writing variable labels
original = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1.0, 3.0, 27.0, 81.0],
'c': ['Atlanta', 'Birmingham',
'Cincinnati', 'Detroit']})
original.index.name = 'index'
variable_labels = {'a': 'City Rank', 'b': 'City Exponent', 'c': 'City'}
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
expected_labels = {'index': '',
'a': 'City Rank',
'b': 'City Exponent',
'c': 'City'}
tm.assert_equal(read_labels, expected_labels)
variable_labels['index'] = 'The Index'
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
tm.assert_equal(read_labels, variable_labels)
def test_write_variable_label_errors(self):
original = pd.DataFrame({'a': [1, 2, 3, 4],
'b': [1.0, 3.0, 27.0, 81.0],
'c': ['Atlanta', 'Birmingham',
'Cincinnati', 'Detroit']})
values = [u'\u03A1', u'\u0391',
u'\u039D', u'\u0394',
u'\u0391', u'\u03A3']
variable_labels_utf8 = {'a': 'City Rank',
'b': 'City Exponent',
'c': u''.join(values)}
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_utf8)
variable_labels_long = {'a': 'City Rank',
'b': 'City Exponent',
'c': 'A very, very, very long variable label '
'that is too long for Stata which means '
'that it has more than 80 characters'}
with tm.assertRaises(ValueError):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_long)
def test_default_date_conversion(self):
# GH 12259
dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000)]
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
reread = read_stata(path, convert_dates=True)
tm.assert_frame_equal(original, reread)
original.to_stata(path,
write_index=False,
convert_dates={'dates': 'tc'})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
def test_unsupported_type(self):
original = pd.DataFrame({'a': [1 + 2j, 2 + 4j]})
with tm.assertRaises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_unsupported_datetype(self):
dates = [dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000)]
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with tm.assertRaises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path, convert_dates={'dates': 'tC'})
dates = pd.date_range('1-1-1990', periods=3, tz='Asia/Hong_Kong')
original = pd.DataFrame({'nums': [1.0, 2.0, 3.0],
'strs': ['apple', 'banana', 'cherry'],
'dates': dates})
with tm.assertRaises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_repeated_column_labels(self):
# GH 13923
with tm.assertRaises(ValueError) as cm:
read_stata(self.dta23, convert_categoricals=True)
tm.assertTrue('wolof' in cm.exception)
def test_stata_111(self):
# 111 is an old version but still used by current versions of
# SAS when exporting to Stata format. We do not know of any
# on-line documentation for this version.
df = read_stata(self.dta24_111)
original = pd.DataFrame({'y': [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0],
'x': [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6],
'w': [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3],
'z': ['a', 'b', 'c', 'd', 'e', '', 'g', 'h',
'i', 'j']})
original = original[['y', 'x', 'w', 'z']]
tm.assert_frame_equal(original, df)
def test_out_of_range_double(self):
# GH 14618
df = DataFrame({'ColumnOk': [0.0,
np.finfo(np.double).eps,
4.49423283715579e+307],
'ColumnTooBig': [0.0,
np.finfo(np.double).eps,
np.finfo(np.double).max]})
with tm.assertRaises(ValueError) as cm:
with tm.ensure_clean() as path:
df.to_stata(path)
tm.assertTrue('ColumnTooBig' in cm.exception)
df.loc[2, 'ColumnTooBig'] = np.inf
with tm.assertRaises(ValueError) as cm:
with tm.ensure_clean() as path:
df.to_stata(path)
tm.assertTrue('ColumnTooBig' in cm.exception)
tm.assertTrue('infinity' in cm.exception)
def test_out_of_range_float(self):
original = DataFrame({'ColumnOk': [0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max / 10.0],
'ColumnTooBig': [0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max]})
original.index.name = 'index'
for col in original:
original[col] = original[col].astype(np.float32)
with tm.ensure_clean() as path:
original.to_stata(path)
reread = read_stata(path)
original['ColumnTooBig'] = original['ColumnTooBig'].astype(
np.float64)
tm.assert_frame_equal(original,
reread.set_index('index'))
original.loc[2, 'ColumnTooBig'] = np.inf
with tm.assertRaises(ValueError) as cm:
with tm.ensure_clean() as path:
original.to_stata(path)
tm.assertTrue('ColumnTooBig' in cm.exception)
tm.assertTrue('infinity' in cm.exception)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
bdrillard/spark | python/pyspark/testing/sqlutils.py | 15 | 7799 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
from pyspark.util import _exception_message
pandas_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = _exception_message(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = _exception_message(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = _exception_message(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
sumspr/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
xiaoxiamii/scikit-learn | sklearn/manifold/t_sne.py | 48 | 20644 | # Author: Alexander Fabisch -- <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the standard t-SNE implementation. There are faster modifications of
# the algorithm:
# * Barnes-Hut-SNE: reduces the complexity of the gradient computation from
# N^2 to N log N (http://arxiv.org/abs/1301.3342)
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _kl_divergence(params, P, alpha, n_samples, n_components):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
alpha : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= alpha
n **= (alpha + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (alpha + 1.0) / alpha
grad *= c
return kl_divergence, grad
def _gradient_descent(objective, p0, it, n_iter, n_iter_without_progress=30,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
grad_norm = linalg.norm(grad)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if min_grad_norm >= grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if min_error_diff >= error_diff:
if verbose >= 2:
print("[t-SNE] Iteration %d: error difference %f. Finished."
% (i + 1, error_diff))
break
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if verbose >= 2 and (i + 1) % 10 == 0:
print("[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
% (i + 1, error, grad_norm))
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 887.28..., 238.61...],
[ -714.79..., 3243.34...],
[ 957.30..., -2505.78...],
[-1130.28..., -974.78...])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None):
if init not in ["pca", "random"]:
raise ValueError("'init' must be either 'pca' or 'random'")
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
# Degrees of freedom of the Student's t-distribution. The suggestion
# alpha = n_components - 1 comes from "Learning a Parametric Embedding
# by Preserving Local Structure" Laurens van der Maaten, 2009.
alpha = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
self.training_data_ = X
P = _joint_probabilities(distances, self.perplexity, self.verbose)
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
self.embedding_ = self._tsne(P, alpha, n_samples, random_state,
X_embedded=X_embedded)
return self
def _tsne(self, P, alpha, n_samples, random_state, X_embedded=None):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=0, n_iter=50, momentum=0.5,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=100, momentum=0.8,
min_grad_norm=0.0, min_error_diff=0.0,
learning_rate=self.learning_rate, verbose=self.verbose,
args=[P, alpha, n_samples, self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Final optimization
P /= self.early_exaggeration
params, error, it = _gradient_descent(
_kl_divergence, params, it=it + 1, n_iter=self.n_iter,
min_grad_norm=self.min_grad_norm,
n_iter_without_progress=self.n_iter_without_progress,
momentum=0.8, learning_rate=self.learning_rate,
verbose=self.verbose, args=[P, alpha, n_samples,
self.n_components])
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Transform X to the embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
draperjames/qtpandas | qtpandas/utils.py | 1 | 8255 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# For Python 2 compatibility
# from __future__ import print_function
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
from random import randint
from pandas import to_datetime
import pandas as pd
import numpy as np
import os
def fillNoneValues(column):
"""Fill all NaN/NaT values of a column with an empty string
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Series with filled NaN values.
"""
if column.dtype == object:
column.fillna('', inplace=True)
return column
def convertTimestamps(column):
"""Convert a dtype of a given column to a datetime.
This method tries to do this by brute force.
Args:
column (pandas.Series): A Series object with all rows.
Returns:
column: Converted to datetime if no errors occured, else the
original column will be returned.
"""
tempColumn = column
try:
# Try to convert the first row and a random row instead of the complete
# column, might be faster
# tempValue = np.datetime64(column[0])
tempValue = np.datetime64(column[randint(0, len(column.index) - 1)])
tempColumn = column.apply(to_datetime)
except Exception:
pass
return tempColumn
def superReadCSV(filepath, first_codec='UTF_8', usecols=None,
low_memory=False, dtype=None, parse_dates=True,
sep=',', chunksize=None, verbose=False, **kwargs):
"""
A wrap to pandas read_csv with mods to accept a dataframe or
filepath. returns dataframe untouched, reads filepath and returns
dataframe based on arguments.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
assert isinstance(first_codec, str), "first_codec must be a string"
codecs = ['UTF_8', 'ISO-8859-1', 'ASCII', 'UTF_16', 'UTF_32']
try:
codecs.remove(first_codec)
except ValueError as not_in_list:
pass
codecs.insert(0, first_codec)
errors = []
for c in codecs:
try:
return pd.read_csv(filepath,
usecols=usecols,
low_memory=low_memory,
encoding=c,
dtype=dtype,
parse_dates=parse_dates,
sep=sep,
chunksize=chunksize,
**kwargs)
# Need to catch `UnicodeError` here, not just `UnicodeDecodeError`,
# because pandas 0.23.1 raises it when decoding with UTF_16 and the
# file is not in that format:
except (UnicodeError, UnboundLocalError) as e:
errors.append(e)
except Exception as e:
errors.append(e)
if 'tokenizing' in str(e):
pass
else:
raise
if verbose:
[print(e) for e in errors]
raise UnicodeDecodeError("Tried {} codecs and failed on all: \n CODECS: {} \n FILENAME: {}".format(
len(codecs), codecs, os.path.basename(filepath)))
def _count(item, string):
if len(item) == 1:
return len(''.join(x for x in string if x == item))
return len(str(string.split(item)))
def identify_sep(filepath):
"""
Identifies the separator of data in a filepath.
It reads the first line of the file and counts supported separators.
Currently supported separators: ['|', ';', ',','\t',':']
"""
ext = os.path.splitext(filepath)[1].lower()
allowed_exts = ['.csv', '.txt', '.tsv']
assert ext in ['.csv', '.txt'], "Unexpected file extension {}. \
Supported extensions {}\n filename: {}".format(
ext, allowed_exts, os.path.basename(filepath))
maybe_seps = ['|',
';',
',',
'\t',
':']
with open(filepath,'r') as fp:
header = fp.__next__()
count_seps_header = {sep: _count(sep, header) for sep in maybe_seps}
count_seps_header = {sep: count for sep,
count in count_seps_header.items() if count > 0}
if count_seps_header:
return max(count_seps_header.__iter__(),
key=(lambda key: count_seps_header[key]))
else:
raise Exception("Couldn't identify the sep from the header... here's the information:\n HEADER: {}\n SEPS SEARCHED: {}".format(header, maybe_seps))
def superReadText(filepath, **kwargs):
"""
A wrapper to superReadCSV which wraps pandas.read_csv().
The benefit of using this function is that it automatically identifies the
column separator.
.tsv files are assumed to have a \t (tab) separation
.csv files are assumed to have a comma separation.
.txt (or any other type) get the first line of the file opened
and get tested for various separators as defined in the identify_sep
function.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
sep = kwargs.get('sep', None)
ext = os.path.splitext(filepath)[1].lower()
if sep is None:
if ext == '.tsv':
kwargs['sep'] = '\t'
elif ext == '.csv':
kwargs['sep'] = ','
else:
found_sep = identify_sep(filepath)
print(found_sep)
kwargs['sep'] = found_sep
return superReadCSV(filepath, **kwargs)
def superReadFile(filepath, **kwargs):
"""
Uses pandas.read_excel (on excel files) and returns a dataframe of the
first sheet (unless sheet is specified in kwargs)
Uses superReadText (on .txt,.tsv, or .csv files) and returns a dataframe of
the data. One function to read almost all types of data files.
"""
if isinstance(filepath, pd.DataFrame):
return filepath
ext = os.path.splitext(filepath)[1].lower()
if ext in ['.xlsx', '.xls']:
df = pd.read_excel(filepath, **kwargs)
elif ext in ['.pkl', '.p', '.pickle', '.pk']:
df = pd.read_pickle(filepath)
else:
# Assume it's a text-like file and try to read it.
try:
df = superReadText(filepath, **kwargs)
except Exception as e:
# TODO: Make this trace back better? Custom Exception? Raise original?
raise Exception("Error reading file: {}".format(e))
return df
def dedupe_cols(frame):
"""
Need to dedupe columns that have the same name.
"""
cols = list(frame.columns)
for i, item in enumerate(frame.columns):
if item in frame.columns[:i]:
cols[i] = "toDROP"
frame.columns = cols
return frame.drop("toDROP", 1, errors='ignore')
def rename_dupe_cols(cols):
"""
Takes a list of strings and appends 2,3,4 etc to duplicates. Never
appends a 0 or 1. Appended #s are not always in order...but if you wrap
this in a dataframe.to_sql function you're guaranteed to not have dupe
column name errors importing data to SQL...you'll just have to check
yourself to see which fields were renamed.
"""
counts = {}
positions = {pos: fld for pos, fld in enumerate(cols)}
for c in cols:
if c in counts.keys():
counts[c] += 1
else:
counts[c] = 1
fixed_cols = {}
for pos, col in positions.items():
if counts[col] > 1:
fix_cols = {pos: fld for pos, fld in positions.items() if fld == col}
keys = [p for p in fix_cols.keys()]
min_pos = min(keys)
cnt = 1
for p, c in fix_cols.items():
if not p == min_pos:
cnt += 1
c = c + str(cnt)
fixed_cols.update({p: c})
positions.update(fixed_cols)
cols = [x for x in positions.values()]
return cols
| mit |
JohnGriffiths/nipype | tools/make_examples.py | 16 | 2859 | #!/usr/bin/env python
"""Run the py->rst conversion and run all examples.
This also creates the index.rst file appropriately, makes figures, etc.
"""
#-----------------------------------------------------------------------------
# Library imports
#-----------------------------------------------------------------------------
# Stdlib imports
import os
import sys
from glob import glob
# Third-party imports
# We must configure the mpl backend before making any further mpl imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib._pylab_helpers import Gcf
# Local tools
from toollib import *
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
examples_header = """
.. _examples:
Examples
========
.. note_about_examples
"""
#-----------------------------------------------------------------------------
# Function defintions
#-----------------------------------------------------------------------------
# These global variables let show() be called by the scripts in the usual
# manner, but when generating examples, we override it to write the figures to
# files with a known name (derived from the script name) plus a counter
figure_basename = None
# We must change the show command to save instead
def show():
allfm = Gcf.get_all_fig_managers()
for fcount, fm in enumerate(allfm):
fm.canvas.figure.savefig('%s_%02i.png' %
(figure_basename, fcount+1))
_mpl_show = plt.show
plt.show = show
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
# Work in examples directory
cd('users/examples')
if not os.getcwd().endswith('users/examples'):
raise OSError('This must be run from doc/examples directory')
# Run the conversion from .py to rst file
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples')
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples/frontiers_paper')
# Make the index.rst file
"""
index = open('index.rst', 'w')
index.write(examples_header)
for name in [os.path.splitext(f)[0] for f in glob('*.rst')]:
#Don't add the index in there to avoid sphinx errors and don't add the
#note_about examples again (because it was added at the top):
if name not in(['index','note_about_examples']):
index.write(' %s\n' % name)
index.close()
"""
# Execute each python script in the directory.
if '--no-exec' in sys.argv:
pass
else:
if not os.path.isdir('fig'):
os.mkdir('fig')
for script in glob('*.py'):
figure_basename = pjoin('fig', os.path.splitext(script)[0])
execfile(script)
plt.close('all')
| bsd-3-clause |
bthirion/nipy | nipy/modalities/fmri/tests/test_dmtx.py | 1 | 15829 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Test the design_matrix utilities.
Note that the tests just looks whether the data produces has correct dimension,
not whether it is exact
"""
from __future__ import with_statement
import numpy as np
from os.path import join, dirname
from ..experimental_paradigm import (EventRelatedParadigm, BlockParadigm)
from ..design_matrix import (dmtx_light, _convolve_regressors, dmtx_from_csv,
make_dmtx)
from nibabel.tmpdirs import InTemporaryDirectory
from nose.tools import assert_true, assert_equal
from numpy.testing import assert_almost_equal, dec, assert_array_equal
try:
import matplotlib.pyplot
except ImportError:
have_mpl = False
else:
have_mpl = True
DMTX = np.load(join(dirname(__file__), 'spm_dmtx.npz'))
def basic_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
paradigm = EventRelatedParadigm(conditions, onsets)
return paradigm
def modulated_block_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
duration = 5 + 5 * np.random.rand(len(onsets))
values = 1 + np.random.rand(len(onsets))
paradigm = BlockParadigm(conditions, onsets, duration, values)
return paradigm
def modulated_event_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
values = 1 + np.random.rand(len(onsets))
paradigm = EventRelatedParadigm(conditions, onsets, values)
return paradigm
def block_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
duration = 5 * np.ones(9)
paradigm = BlockParadigm (conditions, onsets, duration)
return paradigm
@dec.skipif(not have_mpl)
def test_show_dmtx():
# test that the show code indeed (formally) runs
frametimes = np.linspace(0, 127 * 1.,128)
DM = make_dmtx(frametimes, drift_model='polynomial', drift_order=3)
ax = DM.show()
assert (ax is not None)
def test_dmtx0():
# Test design matrix creation when no paradigm is provided
tr = 1.0
frametimes = np.linspace(0, 127 * tr,128)
X, names= dmtx_light(frametimes, drift_model='polynomial',
drift_order=3)
assert_equal(len(names), 4)
def test_dmtx0b():
# Test design matrix creation when no paradigm is provided
tr = 1.0
frametimes = np.linspace(0, 127 * tr,128)
X, names= dmtx_light(frametimes, drift_model='polynomial',
drift_order=3)
assert_almost_equal(X[:, 0], np.linspace(- 0.5, .5, 128))
def test_dmtx0c():
# test design matrix creation when regressors are provided manually
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
ax = np.random.randn(128, 4)
X, names= dmtx_light(frametimes, drift_model='polynomial',
drift_order=3, add_regs=ax)
assert_almost_equal(X[:, 0], ax[:, 0])
def test_dmtx0d():
# test design matrix creation when regressors are provided manually
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
ax = np.random.randn(128, 4)
X, names= dmtx_light(frametimes, drift_model='polynomial',
drift_order=3, add_regs=ax)
assert_equal(len(names), 8)
assert_equal(X.shape[1], 8)
def test_dmtx1():
# basic test based on basic_paradigm and canonical hrf
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_equal(len(names), 7)
def test_convolve_regressors():
# tests for convolve_regressors helper function
conditions = ['c0', 'c1']
onsets = [20, 40]
paradigm = EventRelatedParadigm(conditions, onsets)
# names not passed -> default names
frametimes = np.arange(100)
f, names = _convolve_regressors(paradigm, 'canonical', frametimes)
assert_equal(names, ['c0', 'c1'])
def test_dmtx1b():
# idem test_dmtx1, but different test
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_equal(X.shape, (128, 7))
def test_dmtx1c():
# idem test_dmtx1, but different test
tr = 1.0
frametimes = np.linspace(0, 127 *tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X,names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_true((X[:, - 1] == 1).all())
def test_dmtx1d():
# idem test_dmtx1, but different test
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_true((np.isnan(X) == 0).all())
def test_dmtx2():
# idem test_dmtx1 with a different drift term
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='cosine', hfcut=63)
assert_equal(len(names), 8)
def test_dmtx3():
# idem test_dmtx1 with a different drift term
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
X,names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='blank')
assert_equal(len(names), 4)
def test_dmtx4():
# idem test_dmtx1 with a different hrf model
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical With Derivative'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_equal(len(names), 10)
def test_dmtx5():
# idem test_dmtx1 with a block paradigm
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = block_paradigm()
hrf_model = 'Canonical'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_equal(len(names), 7)
def test_dmtx6():
# idem test_dmtx1 with a block paradigm and the hrf derivative
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = block_paradigm()
hrf_model = 'Canonical With Derivative'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_equal(len(names), 10)
def test_dmtx7():
# idem test_dmtx1, but odd paradigm
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
conditions = [0, 0, 0, 1, 1, 1, 3, 3, 3]
# no condition 'c2'
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
paradigm = EventRelatedParadigm(conditions, onsets)
hrf_model = 'Canonical'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_equal(len(names), 7)
def test_dmtx8():
# basic test based on basic_paradigm and FIR
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names= dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
assert_equal(len(names), 7)
def test_dmtx9():
# basic test based on basic_paradigm and FIR
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3,
fir_delays=range(1, 5))
assert_equal(len(names), 16)
def test_dmtx10():
# Check that the first column o FIR design matrix is OK
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all((X[onset + 1, 0] == 1)))
def test_dmtx11():
# check that the second column of the FIR design matrix is OK indeed
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all(X[onset + 3, 2] == 1))
def test_dmtx12():
# check that the 11th column of a FIR design matrix is indeed OK
tr = 1.0
frametimes = np.linspace(0, 127 * tr,128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c2'].astype(np.int)
assert_true(np.all(X[onset + 4, 11] == 1))
def test_dmtx13():
# Check that the fir_duration is well taken into account
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all(X[onset + 1, 0] == 1))
def test_dmtx14():
# Check that the first column o FIR design matrix is OK after a 1/2
# time shift
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128) + tr / 2
paradigm = basic_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3,
fir_delays=range(1, 5))
onset = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int)
assert_true(np.all(X[onset + 1, 0] > .9))
def test_dmtx15():
# basic test based on basic_paradigm, plus user supplied regressors
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
ax = np.random.randn(128, 4)
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3, add_regs=ax)
assert_equal(len(names), 11)
assert_equal(X.shape[1], 11)
def test_dmtx16():
# Check that additional regressors are put at the right place
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = basic_paradigm()
hrf_model = 'Canonical'
ax = np.random.randn(128, 4)
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3, add_regs=ax)
assert_almost_equal(X[:, 3: 7], ax)
def test_dmtx17():
# Test the effect of scaling on the events
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_event_paradigm()
hrf_model = 'Canonical'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) + 1
assert_true((X[ct, 0] > 0).all())
def test_dmtx18():
# Test the effect of scaling on the blocks
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_block_paradigm()
hrf_model = 'Canonical'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3)
ct = paradigm.onset[paradigm.con_id == 'c0'].astype(np.int) + 3
assert_true((X[ct, 0] > 0).all())
def test_dmtx19():
# Test the effect of scaling on a FIR model
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_event_paradigm()
hrf_model = 'FIR'
X, names = dmtx_light(frametimes, paradigm, hrf_model=hrf_model,
drift_model='polynomial', drift_order=3,
fir_delays=range(1, 5))
idx = paradigm.onset[paradigm.con_id == 0].astype(np.int)
assert_array_equal(X[idx + 1, 0], X[idx + 2, 1])
def test_dmtx20():
# Test for commit 10662f7
frametimes = np.arange(0, 127) # integers
paradigm = modulated_event_paradigm()
X, names = dmtx_light(frametimes, paradigm, hrf_model='canonical',
drift_model='cosine')
# check that the drifts are not constant
assert_true(np.all(np.diff(X[:, -2]) != 0))
def test_fir_block():
# tets FIR models on block designs
bp = block_paradigm()
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
X, names = dmtx_light(frametimes, bp, hrf_model='fir', drift_model='blank',
fir_delays=range(0, 4))
idx = bp.onset[bp.con_id == 1].astype(np.int)
assert_equal(X.shape, (128, 13))
assert_true((X[idx, 4] == 1).all())
assert_true((X[idx + 1, 5] == 1).all())
assert_true((X[idx + 2, 6] == 1).all())
assert_true((X[idx + 3, 7] == 1).all())
def test_csv_io():
# test the csv io on design matrices
tr = 1.0
frametimes = np.linspace(0, 127 * tr, 128)
paradigm = modulated_event_paradigm()
DM = make_dmtx(frametimes, paradigm, hrf_model='Canonical',
drift_model='polynomial', drift_order=3)
path = 'dmtx.csv'
with InTemporaryDirectory():
DM.write_csv(path)
DM2 = dmtx_from_csv(path)
assert_almost_equal(DM.matrix, DM2.matrix)
assert_equal(DM.names, DM2.names)
def test_spm_1():
# Check that the nipy design matrix is close enough to the SPM one
# (it cannot be identical, because the hrf shape is different)
frametimes = np.linspace(0, 99, 100)
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
paradigm = EventRelatedParadigm(conditions, onsets)
X1 = make_dmtx(frametimes, paradigm, drift_model='blank')
spm_dmtx = DMTX['arr_0']
assert_true(((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum()
< .1)
def test_spm_2():
# Check that the nipy design matrix is close enough to the SPM one
# (it cannot be identical, because the hrf shape is different)
frametimes = np.linspace(0, 99, 100)
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60]
duration = 10 * np.ones(9)
paradigm = BlockParadigm(conditions, onsets, duration)
X1 = make_dmtx(frametimes, paradigm, drift_model='blank')
spm_dmtx = DMTX['arr_1']
assert_true(((spm_dmtx - X1.matrix) ** 2).sum() / (spm_dmtx ** 2).sum()
< .1)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
tschaume/pymatgen | pymatgen/phonon/plotter.py | 2 | 23775 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
from collections import OrderedDict, namedtuple
import numpy as np
import scipy.constants as const
from monty.json import jsanitize
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.util.plotting import pretty_plot, add_fig_kwargs, get_ax_fig_plt
from pymatgen.electronic_structure.plotter import plot_brillouin_zone
"""
This module implements plotter for DOS and band structure.
"""
logger = logging.getLogger(__name__)
FreqUnits = namedtuple("FreqUnits", ["factor", "label"])
def freq_units(units):
"""
Returns conversion factor from THz to the requred units and the label in the form of a namedtuple
Accepted values: thz, ev, mev, ha, cm-1, cm^-1
"""
d = {"thz": FreqUnits(1, "THz"),
"ev": FreqUnits(const.value("hertz-electron volt relationship") * const.tera, "eV"),
"mev": FreqUnits(const.value("hertz-electron volt relationship") * const.tera / const.milli, "meV"),
"ha": FreqUnits(const.value("hertz-hartree relationship") * const.tera, "Ha"),
"cm-1": FreqUnits(const.value("hertz-inverse meter relationship") * const.tera * const.centi, "cm^{-1}"),
'cm^-1': FreqUnits(const.value("hertz-inverse meter relationship") * const.tera * const.centi, "cm^{-1}")
}
try:
return d[units.lower().strip()]
except KeyError:
raise KeyError('Value for units `{}` unknown\nPossible values are:\n {}'.format(units, list(d.keys())))
class PhononDosPlotter:
"""
Class for plotting phonon DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = PhononDosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompletePhononDos.get_element_dos().
Args:
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, stack=False, sigma=None):
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
PhononDos object
"""
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'frequencies':..,
'densities': ...}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None, units="thz"):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
u = freq_units(units)
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies'] * u.factor
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
plt.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel(r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label))
plt.ylabel(r'$\mathrm{Density\ of\ states}$')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None, units="thz"):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1
"""
plt = self.get_plot(xlim, ylim, units=units)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None, units="thz"):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(xlim, ylim, units=units)
plt.show()
class PhononBSPlotter:
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, PhononBandStructureSymmLine):
raise ValueError(
"PhononBSPlotter only works with PhononBandStructureSymmLine objects. "
"A PhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self):
"""
Get the data nicely formatted for a plot
Returns:
A dict of the following format:
ticks: A dict with the 'distances' at which there is a qpoint (the
x axis) and the labels (None if no label)
frequencies: A list (one element for each branch) of frequencies for
each qpoint: [branch][qpoint][mode]. The data is
stored by branch to facilitate the plotting
lattice: The reciprocal lattice.
"""
distance = []
frequency = []
ticks = self.get_ticks()
for b in self._bs.branches:
frequency.append([])
distance.append([self._bs.distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
for i in range(self._nb_bands):
frequency[-1].append(
[self._bs.bands[i][j]
for j in range(b['start_index'], b['end_index'] + 1)])
return {'ticks': ticks, 'distances': distance, 'frequency': frequency,
'lattice': self._bs.lattice_rec.as_dict()}
def get_plot(self, ylim=None, units="thz"):
"""
Get a matplotlib object for the bandstructure plot.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
u = freq_units(units)
plt = pretty_plot(12, 8)
band_linewidth = 1
data = self.bs_plot_data()
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['frequency'][d][i][j] * u.factor
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
self._maketicks(plt)
# plot y=0 line
plt.axhline(0, linewidth=1, color='k')
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{{Frequencies\ ({})}}$'.format(u.label)
plt.ylabel(ylabel, fontsize=30)
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is not None:
plt.ylim(ylim)
plt.tight_layout()
return plt
def show(self, ylim=None, units="thz"):
"""
Show the plot using matplotlib.
Args:
ylim: Specify the y-axis (frequency) limits; by default None let
the code choose.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(ylim, units=units)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None, units="thz"):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
"""
plt = self.get_plot(ylim=ylim, units=units)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
A dict with 'distance': a list of distance at which ticks should
be set and 'label': a list of label for each of those ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs.qpoints[0].label
previous_branch = self._bs.branches[0]['name']
for i, c in enumerate(self._bs.qpoints):
if c.label is not None:
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter):
"""
plot two band structure for comparison. One is in red the other in blue.
The two band structures need to be defined on the same symmetry lines!
and the distance between symmetry lines is
the one of the band structure used to build the PhononBSPlotter
Args:
another PhononBSPlotter object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
if len(data_orig['distances']) != len(data['distances']):
raise ValueError('The two objects are not compatible.')
plt = self.get_plot()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[i] for e in data['frequency']][d],
'r-', linewidth=band_linewidth)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
# get labels and lines
labels = {}
for q in self._bs.qpoints:
if q.label:
labels[q.label] = q.frac_coords
lines = []
for b in self._bs.branches:
lines.append([self._bs.qpoints[b['start_index']].frac_coords,
self._bs.qpoints[b['end_index']].frac_coords])
plot_brillouin_zone(self._bs.lattice_rec, lines=lines, labels=labels)
class ThermoPlotter:
"""
Plotter for thermodynamic properties obtained from phonon DOS.
If the structure corresponding to the DOS, it will be used to extract the forumla unit and provide
the plots in units of mol instead of mole-cell
"""
def __init__(self, dos, structure=None):
"""
Args:
dos: A PhononDos object.
structure: A Structure object corresponding to the structure used for the calculation.
"""
self.dos = dos
self.structure = structure
def _plot_thermo(self, func, temperatures, factor=1, ax=None, ylabel=None, label=None, ylim=None, **kwargs):
"""
Plots a thermodynamic property for a generic function from a PhononDos instance.
Args:
func: the thermodynamic function to be used to calculate the property
temperatures: a list of temperatures
factor: a multiplicative factor applied to the thermodynamic property calculated. Used to change
the units.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
ylabel: label for the y axis
label: label of the plot
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
ax, fig, plt = get_ax_fig_plt(ax)
values = []
for t in temperatures:
values.append(func(t, structure=self.structure) * factor)
ax.plot(temperatures, values, label=label, **kwargs)
if ylim:
ax.set_ylim(ylim)
ax.set_xlim((np.min(temperatures), np.max(temperatures)))
ylim = plt.ylim()
if ylim[0] < 0 < ylim[1]:
plt.plot(plt.xlim(), [0, 0], 'k-', linewidth=1)
ax.set_xlabel(r"$T$ (K)")
if ylabel:
ax.set_ylabel(ylabel)
return fig
@add_fig_kwargs
def plot_cv(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the constant volume specific heat C_v in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$C_v$ (J/K/mol)"
else:
ylabel = r"$C_v$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig
@add_fig_kwargs
def plot_entropy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational entrpy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$S$ (J/K/mol)"
else:
ylabel = r"$S$ (J/K/mol-c)"
fig = self._plot_thermo(self.dos.entropy, temperatures, ylabel=ylabel, ylim=ylim, **kwargs)
return fig
@add_fig_kwargs
def plot_internal_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational internal energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta E$ (kJ/mol)"
else:
ylabel = r"$\Delta E$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.internal_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig
@add_fig_kwargs
def plot_helmholtz_free_energy(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots the vibrational contribution to the Helmoltz free energy in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
if self.structure:
ylabel = r"$\Delta F$ (kJ/mol)"
else:
ylabel = r"$\Delta F$ (kJ/mol-c)"
fig = self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylabel=ylabel, ylim=ylim,
factor=1e-3, **kwargs)
return fig
@add_fig_kwargs
def plot_thermodynamic_properties(self, tmin, tmax, ntemp, ylim=None, **kwargs):
"""
Plots all the thermodynamic properties in a temperature range.
Args:
tmin: minimum temperature
tmax: maximum temperature
ntemp: number of steps
ylim: tuple specifying the y-axis limits.
kwargs: kwargs passed to the matplotlib function 'plot'.
Returns:
matplotlib figure
"""
temperatures = np.linspace(tmin, tmax, ntemp)
mol = "" if self.structure else "-c"
fig = self._plot_thermo(self.dos.cv, temperatures, ylabel="Thermodynamic properties", ylim=ylim,
label=r"$C_v$ (J/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.entropy, temperatures, ylim=ylim, ax=fig.axes[0],
label=r"$S$ (J/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.internal_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,
label=r"$\Delta E$ (kJ/K/mol{})".format(mol), **kwargs)
self._plot_thermo(self.dos.helmholtz_free_energy, temperatures, ylim=ylim, ax=fig.axes[0], factor=1e-3,
label=r"$\Delta F$ (kJ/K/mol{})".format(mol), **kwargs)
fig.axes[0].legend(loc="best")
return fig
| mit |
vicky2135/lucious | oscar/lib/python2.7/site-packages/IPython/core/display.py | 6 | 34087 | # -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
try:
from base64 import encodebytes as base64_encode
except ImportError:
from base64 import encodestring as base64_encode
import json
import mimetypes
import os
import struct
import sys
import warnings
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def publish_display_data(data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
data=data,
metadata=metadata,
)
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Note: If raw=False and the object does not have a HTML
representation, no HTML will be shown.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
_read_flags = 'rb'
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, string_types):
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=None,
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width in pixels to which to constrain the image in html
height : int
Height in pixels to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if format is None:
if ext is not None:
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
else:
format = ext.lower()
elif isinstance(data, bytes):
# infer image type from image data header,
# only if format has not been specified.
if data[:2] == _JPEG:
format = self._FMT_JPEG
# failed to detect format, default png
if format is None:
format = 'png'
if format.lower() == 'jpg':
# jpg->jpeg
format = self._FMT_JPEG
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=False, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw video data or a URL or filename to load the data from.
Raw data will require passing `embed=True`.
url : unicode
A URL for the video. If you specify `url=`,
the image data will not be embedded.
filename : unicode
Path to a local file containing the video.
Will be interpreted as a local URL unless `embed=True`.
embed : bool
Should the video be embedded using a data URI (True) or be
loaded using a <video> tag (False).
Since videos are large, embedding them should be avoided, if possible.
You must confirm embedding as your intention by passing `embed=True`.
Local files can be displayed with URLs without embedding the content, via::
Video('./video.mp4')
mimetype: unicode
Specify the mimetype for embedded videos.
Default will be guessed from file extension, if available.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=True)
Video(b'raw-videodata', embed=True)
"""
if url is None and isinstance(data, string_types) and data.startswith(('http:', 'https:')):
url = data
data = None
elif os.path.exists(data):
filename = data
data = None
if data and not embed:
msg = ''.join([
"To embed videos, you must pass embed=True ",
"(this may make your notebook files huge)\n",
"Consider passing Video(url='...')",
])
raise ValueError(msg)
self.mimetype = mimetype
self.embed = embed
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos are base64-encoded.
mimetype = self.mimetype
if self.filename is not None:
if not mimetype:
mimetype, _ = mimetypes.guess_type(self.filename)
with open(self.filename, 'rb') as f:
video = f.read()
else:
video = self.data
if isinstance(video, unicode_type):
# unicode input is already b64-encoded
b64_video = video
else:
b64_video = base64_encode(video).decode('ascii').rstrip()
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, b64_video)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
# build kwargs, starting with InlineBackend config
kw = {}
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| bsd-3-clause |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/measure_resolution/lmfit-py/doc/sphinx/numpydoc/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| apache-2.0 |
sebalander/sebaPhD | testHomography.py | 2 | 2668 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 30 16:06:58 2016
@author: sebalander
"""
# %%
import cv2
import numpy as np
import scipy as sp
import glob
import matplotlib.pyplot as plt
import poseCalibration as pc
np.random.seed(0)
# %% LOAD DATA
#imagesFolder = "./resources/fishChessboardImg/"
#cornersFile = "/home/sebalander/code/sebaPhD/resources/fishCorners.npy"
#patternFile = "/home/sebalander/code/sebaPhD/resources/chessPattern.npy"
#imgShapeFile = "/home/sebalander/code/sebaPhD/resources/fishShape.npy"
imagesFolder = "./resources/PTZchessboard/zoom 0.0/"
cornersFile = "./resources/PTZchessboard/zoom 0.0/ptzCorners.npy"
patternFile = "./resources/chessPattern.npy"
imgShapeFile = "./resources/ptzImgShape.npy"
corners = np.load(cornersFile).transpose((0,2,1,3))
fiducialPoints = np.load(patternFile)
imgSize = np.load(imgShapeFile)
images = glob.glob(imagesFolder+'*.jpg')
# output files
distCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzDistCoeffs.npy"
linearCoeffsFile = "./resources/PTZchessboard/zoom 0.0/ptzLinearCoeffs.npy"
rvecsFile = "./resources/PTZchessboard/zoom 0.0/ptzRvecs.npy"
tvecsFile = "./resources/PTZchessboard/zoom 0.0/ptzTvecs.npy"
# %%
reload(pc)
# %% use real data
f = 5e2 # proposal of f, can't be estimated from homography
rVecs, tVecs, Hs = pc.estimateInitialPose(fiducialPoints, corners, f, imgSize)
pc.plotHomographyToMatch(fiducialPoints, corners[1:3], f, imgSize, images[1:3])
pc.plotForwardHomography(fiducialPoints, corners[1:3], f, imgSize, Hs[1:3], images[1:3])
pc.plotBackwardHomography(fiducialPoints, corners[1:3], f, imgSize, Hs[1:3])
# %% custom sinthetic homography
# estos valores se ven lindos, podrían ser random tambien
rVec = np.array([[-1.17365947],
[ 1.71987668],
[-0.48076979]])
tVec = np.array([[ 2.53529204],
[ 1.53850073],
[ 1.362088 ]])
pc.fiducialComparison3D(rVec, tVec, fiducialPoints)
H = pc.pose2homogr(rVec, tVec)
# %% produce sinthetic corners (no image to compare though)
f = 1e2
imgSize = np.array([800,600])
src = fiducialPoints[0]+[0,0,1]
dst = np.array([np.dot(H, sr) for sr in src])
dst = np.array([dst[:,0]/dst[:,2],
dst[:,1]/dst[:,2]]).T
dst = f * dst + imgSize/2
# sinthetic corners. always have shape (Nimg,Npts,1,2)
corners = np.reshape(dst,(1,len(dst),1,2))
# %% test on sinthetic data
rVecs, tVecs, Hs = pc.estimateInitialPose(fiducialPoints, corners, f, imgSize)
pc.plotHomographyToMatch(fiducialPoints, corners, f, imgSize)
pc.plotForwardHomography(fiducialPoints, corners, f, imgSize, Hs)
pc.plotBackwardHomography(fiducialPoints, corners, f, imgSize, Hs)
| bsd-3-clause |
nguy/brawl4d | radar/vispy_radar_loop_demo.py | 1 | 12872 | import numpy as np
from collection import RadarFileCollection
from pyart.core.transforms import antenna_vectors_to_cartesian, corner_to_point
from quadmesh_geometry import mesh_from_quads, radar_example_data
from vispy import gloo
import vispy
import vispy.app
# from vispy.scene.widgets import ViewBox
from vispy.scene.visuals import Mesh, Text
from vispy.geometry import MeshData
from vispy.scene import STTransform, ChainTransform, MatrixTransform
from matplotlib.cm import ScalarMappable
from matplotlib.colors import Normalize
import glob
class Canvas(vispy.scene.SceneCanvas):
def __init__(self, size=(800, 800), name="Radar Loop",
timer_interval=1.0,
num_radars=1,
radar_filenames=None,
radar_latlons=None,
radar_fields=None,
time_start=None, time_end=None,
loop_step=10, image_duration=10):
'''
Parameters
----------
size : 2-tuple int
(x, y) size in pixels of window.
name : str
Name to use in window label.
timer_interval : float
Interval at which to update data in window.
num_radars : int
The number of radars to display.
radar_filenames : list
List of radar filenames to process. This can be a list of lists
if multiple radars are desired. num_radars must be > 1.
radar_latlons : list of tuples
List of (latitude, longitude) coordinates. This can be a list
the same length as radar_filenames. num_radars must be > 1.
time_start : datetime instance
Start time to use for subset.
time_end : datetime instance
End time to use for subset.
loop_step : float
Seconds between image update in frame.
image_duration : float
Seconds that each image will last in frame.
'''
# self.vb = scene.widgets.ViewBox(parent=self.scene, border_color='b')
# vb.camera.rect = 0, 0, 1, 1
# self.rotation = MatrixTransform()
# Perform a couple of checks
if radar_filenames is None:
print("Must provide a list of filenames!")
return
if (num_radars > 1) & (len(radar_filenames) != num_radars) & (len(radar_latlons) != num_radars):
print("ERROR: Must provide filenames and lat-lons for each radar!")
return
# Prepare some variables if two radars are chosen
self.radar_filenames = radar_filenames
self.t_start = time_start
self.t_end = time_end
self.rnum = num_radars
self.loop_dt = np.timedelta64(loop_step * 1000000000, 'ns')
self.loop_duration = np.timedelta64(image_duration * 1000000000, 'ns')
# Read in the radar files into a collection
self.rfc = []
self.rfc = []
for ii in range(self.rnum):
self.rfc.append(RadarFileCollection(self.radar_filenames[ii]))
## self.rfc = RadarFileCollection(filenames)
self.rfc_88d = RadarFileCollection(filenames_88d)
# Initialize variables for later use
self.dx, self.dy = [], []
if radar_fields is None:
self.radar_fields = ['reflectivity']
else:
self.radar_fields = [radar_fields[0]]
# Find corner points if required
if len(radar_latlons) > 1:
for num in range(1, len(radar_latlons)):
dx_tmp, dy_tmp = corner_to_point(radar_latlons[num], radar_latlons[num-1]) #meters
self.dx.append(dx_tmp)
self.dy.append(dy_tmp)
try:
self.radar_fields.append(radar_fields[num])
except:
self.radar_fields.append('reflectivity')
# Generate dummy data to initialize the Mesh instance
x, y, z, d = radar_example_data()
# print x.shape, y.shape, z.shape
# print d.shape, d.min(), d.max()
mesh = self._init_mesh(x, y, z, d)
mesh_88d = self._init_mesh(x, y, z, d)
# Use colormapping class from matplotlib
self.DZcm = ScalarMappable(norm=Normalize(-25,80), cmap='gist_ncar')
self.VRcm = ScalarMappable(norm=Normalize(-32,32), cmap='PuOr_r')
self.SWcm = ScalarMappable(norm=Normalize(0.0,5.0), cmap='cubehelix_r')
self.radar_mesh = mesh
self.mesh_88d = mesh_88d
self.meshes = (mesh, mesh_88d)
self.rot_view = None
vispy.scene.SceneCanvas.__init__(self, keys='interactive',
title=name, size=size, show=True)
view = self.central_widget.add_view()
view.camera = 'turntable'
view.camera.mode = 'ortho'
view.camera.up = 'z'
view.camera.distance = 20
self.rot_view = view
for a_mesh in self.meshes:
self.rot_view.add(a_mesh)
self.unfreeze() # allow addition of new attributes to the canvas
self.t1 = Text('Time', parent=self.scene, color='white')
self.t1.font_size = 18
self.t1.pos = self.size[0] // 2, self.size[1] // 10
self.loop_reset()
self.timer = vispy.app.Timer(connect=self.loop_radar)
self.timer.start(timer_interval)
def _init_mesh(self, x,y,z,d):
verts, faces = mesh_from_quads(x,y,z)
face_colors = np.empty((faces.shape[0], 4))
face_colors[0::2,0] = d.flat
face_colors[0::2,1] = d.flat
face_colors[0::2,2] = d.flat
face_colors[1::2,0] = d.flat
face_colors[1::2,1] = d.flat
face_colors[1::2,2] = d.flat
face_colors[:,3] = 1.0 # transparency
mdata = MeshData(vertices=verts, faces=faces, face_colors=face_colors)
mesh = Mesh(meshdata=mdata)
# mesh.transform = ChainTransform([STTransform(translate=(0, 0, 0),
# scale=(1.0e-3, 1.0e-3, 1.0e-3) )])
mesh.transform = vispy.scene.transforms.MatrixTransform()
mesh.transform.scale([1./1000, 1./1000, 1./1000])
# mesh.transform.shift([-.2, -.2, -.2])
return mesh
def loop_reset(self):
if self.t_start is not None:
self.loop_start = self.t_start
else:
self.loop_start = np.datetime64(np.min(self.rfc[0].times.values()), 'ns')
if self.t_end is not None:
self.loop_end = self.t_end
else:
self.loop_end = np.datetime64(np.max(self.rfc[0].times.values()), 'ns')
self.loop_current = self.loop_start
def loop_radar(self, event):
current = self.loop_current
last = current
print(current)
self.loop_current = current + self.loop_dt
# ----- Do Ka data -----
# ka_field = 'spectrum_width'
# # ka_field = 'reflectivity'
# r,az,el,t,data = self.rfc.sweep_data_for_time_range(current,
# current+self.loop_duration,
# fieldnames=(ka_field,))
# if r is not None:
# if np.abs(az.mean() - 315.0) > 10:
# az += 90.0
# d = data[ka_field][1:-1, 1:-150]
#
# # print "Found Ka", r.shape, az.shape, el.shape, d.shape
# # print r.min(), r.max(), el.min(), el.max(), az.min(), az.max(), d.min(), d.max()
# verts, faces, face_colors = self._make_plot(r[1:-150], az[1:-1], el[1:-1],
# # d, vmin=-32.0, vmax=25.0, cm=self.DZcm,
# d, vmin=-1.0, vmax=5.0, cm=self.SWcm,
# dx=-dx_ka, dy=-dy_ka)
#
# # print('vert range', verts.min(), verts.max())
#
# self.radar_mesh.set_data(vertices=verts, faces=faces, face_colors=face_colors)
# ----- Do 88D data -----
for ii in range(self.rnum):
r, az, el, t, data = self.rfc[ii].sweep_data_for_time_range(current,
current+self.loop_duration,
fieldnames=(self.radar_fields[0],))
if r is not None:
if (el.mean() < 2.0):
d = data[self.radar_fields[ii]][1:-1, 1:300]
# print "Found 88D", r.shape, az.shape, el.shape, d.shape
# print r.min(), r.max(), el.min(), el.max(), az.min(), az.max(), d.min(), d.max()
verts, faces, face_colors = self._make_plot(
r[1:300], az[1:-1], el[1:-1],
d, vmin=-25.0, vmax=80.0, cm=self.DZcm)
# d, vmin=0.0, vmax=0.4, cm=self.SWcm)
# d, vmin=-32.0, vmax=32.0, cm=self.VRcm)
self.mesh_88d.set_data(vertices=verts, faces=faces, face_colors=face_colors)
face_colors[:,3] = 0.5
# ----- Update plot -----
self.t1.text='{0} UTC'.format(current)
# for m in self.meshes:
# m._program._need_build = True
self.update()
if last>self.loop_end:
self.loop_reset()
def _make_plot(self, r, az, el, d, vmin=-32, vmax=70, dx=0.0, dy=0.0, cm=None):
""" Data are normalized using the min of the data array
after replacing missing values with vmin, so vmin should be less
than the minimum data value
"""
x, y, z = antenna_vectors_to_cartesian(r, az, el, edges=True)
x += dx
y += dy
# print(x.shape, y.shape, z.shape, d.shape)
verts, faces = mesh_from_quads(x, y, z)
squashed = d.filled(vmin).flatten()
face_colors = np.empty((faces.shape[0], 4))
if cm is None:
squashed -= squashed.min()
squashed /= (vmax-vmin) # squashed.max()
# print squashed.min(), squashed.max()
# print(face_colors[0::2,0].shape, squashed.shape)
face_colors[0::2, 0] = squashed # d.flat
face_colors[0::2, 1] = squashed # d.flat
face_colors[0::2, 2] = squashed # d.flat
face_colors[1::2, 0] = squashed # d.flat
face_colors[1::2, 1] = squashed # d.flat
face_colors[1::2, 2] = squashed # d.flat
face_colors[:, 3] = 1.0 # transparency
else:
colors = cm.to_rgba(squashed)
face_colors[0::2] = colors
face_colors[1::2] = colors
return verts, faces, face_colors
def on_draw(self, ev):
gloo.set_clear_color('black')
gloo.clear(color=True, depth=True, stencil=True)
if self.rot_view is not None:
self.draw_visual(self.rot_view)
self.draw_visual(self.t1)
# for mesh in self.meshes:
# print mesh
# self.draw_visual(mesh)
if __name__ == '__main__':
#-------------------
# Selection of interesting times
#-------------------
# filenames = glob.glob('/data/20140607/Ka2/Ka2140608031*')#[5:10]
# filenames_88d = glob.glob('/data/20140607/88D/KLBB20140608_031*')
# t_start = np.datetime64('2014-06-08T03:16:29Z', 'ns')
# filenames = glob.glob('/data/20140607/Ka2/Ka2140608033*')#[5:10]
# filenames_88d = glob.glob('/data/20140607/88D/KLBB20140608_033*')
# t_start = np.datetime64('2014-06-08T03:39:05Z', 'ns')
# t_end = t_start
# timer_interval = 10.0
#-------------------
#
#
# filenames = glob.glob('/data/20140607/Ka2/Ka2140608034*')#[5:10]
# filenames_88d = glob.glob('/data/20140607/88D/KLBB20140608_034*')
# t_start = np.datetime64('2014-06-08T03:40:00Z', 'ns')
# t_end = np.datetime64('2014-06-08T03:50:00Z', 'ns')
#-------------------
filenames = glob.glob('/Users/guy/data/test/brawl_vispy/Ka2/Ka2140608031*')#[5:10]
filenames_88d = glob.glob('/Users/guy/data/test/brawl_vispy/88D/KLBB20140608_031*')
## t_start = datetime.datetime(2014,6,8,3,10,0)
## t_end = datetime.datetime(2014,6,8,3,20,0)
t_start = np.datetime64('2014-06-08T03:10:00Z', 'ns')
t_end = np.datetime64('2014-06-08T03:20:00Z', 'ns')
# dloop, dimage = 10, 10
canvas = Canvas(
radar_filenames=[filenames_88d],
radar_latlons=[(33.654140472412109, -101.81416320800781),
(33.73732, -101.84326)],
time_start=t_start, time_end=t_end,
## loop_step=dloop, image_duration=dimage
)
vispy.app.run()
# canvas.radar_mesh.set_data(self, vertices=None, faces=None, vertex_colors=None, face_colors=None, meshdata=None, color=None) | bsd-2-clause |
bhargavvader/pycobra | docs/plot_voronoi_clustering.py | 1 | 3173 | """
Visualising Clustering with Voronoi Tesselations
------------------------------------------------
When experimenting with using the Voronoi Tesselation to identify which
machines are picked up by certain points, it was easy to extend the idea
to visualising clustering through a voronoi.
Using the ``voronoi_finite_polygons_2d`` method from
``pycobra.visualisation``, it's easy to do this
"""
# %matplotlib inline
import numpy as np
from pycobra.cobra import Cobra
from pycobra.visualisation import Visualisation
from pycobra.diagnostics import Diagnostics
import matplotlib.pyplot as plt
from sklearn import cluster
######################################################################
# Let's make some blobs so clustering is easy.
#
from sklearn.datasets.samples_generator import make_blobs
X, Y = make_blobs(n_samples=200, centers=2, n_features=2)
Y = np.power(X[:,0], 2) + np.power(X[:,1], 2)
######################################################################
# We set up a few scikit-learn clustering machines which we'd like to
# visualise the results of.
#
two_means = cluster.KMeans(n_clusters=2)
spectral = cluster.SpectralClustering(n_clusters=2, eigen_solver='arpack', affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.6)
affinity_propagation = cluster.AffinityPropagation(damping=.9, preference=-200)
birch = cluster.Birch(n_clusters=2)
from pycobra.visualisation import voronoi_finite_polygons_2d
from scipy.spatial import Voronoi, voronoi_plot_2d
######################################################################
# Helper function to implement the Voronoi.
#
def plot_cluster_voronoi(data, algo):
# passing input space to set up voronoi regions.
points = np.hstack((np.reshape(data[:,0], (len(data[:,0]), 1)), np.reshape(data[:,1], (len(data[:,1]), 1))))
vor = Voronoi(points)
# use helper Voronoi
regions, vertices = voronoi_finite_polygons_2d(vor)
fig, ax = plt.subplots()
plot = ax.scatter([], [])
indice = 0
for region in regions:
ax.plot(data[:,0][indice], data[:,1][indice], 'ko')
polygon = vertices[region]
# if it isn't gradient based we just color red or blue depending on whether that point uses the machine in question
color = algo.labels_[indice]
# we assume only two
if color == 0:
color = 'r'
else:
color = 'b'
ax.fill(*zip(*polygon), alpha=0.4, color=color, label="")
indice += 1
ax.axis('equal')
plt.xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
plt.ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
two_means.fit(X)
plot_cluster_voronoi(X, two_means)
dbscan.fit(X)
plot_cluster_voronoi(X, dbscan)
spectral.fit(X)
plot_cluster_voronoi(X, spectral)
affinity_propagation.fit(X)
plot_cluster_voronoi(X, affinity_propagation)
birch.fit(X)
plot_cluster_voronoi(X, birch)
######################################################################
# This is just an example of the things you can do with Voronoi
# Tesselations - it's an interesting way to look at your data!
#
# Licensed under the MIT License - https://opensource.org/licenses/MIT
# | mit |
Ensembl/cttv024 | tests/runner.py | 1 | 1852 | #! /usr/bin/env python3
# ------------------------------------------------
# built-ins
import sys
import unittest
import argparse
import os.path
# pipped
import pandas as pd
# ------------------------------------------------
def add_postgap(suite, postgap):
"""
Iterate through suite and construct a new suite where
each test has postgap passed as a keyword arg to it's
constructor.
"""
new_suite = unittest.TestSuite()
for item in suite:
test_class = item.__class__
if test_class == unittest.TestSuite:
new_suite.addTest(add_postgap(item, postgap))
else:
test_name = item._testMethodName
new_suite.addTest(test_class(test_name, postgap))
return new_suite
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='run the postgap unit tests')
parser.add_argument('filename', type=str, help='a postgap output file (gzipped tsv file)')
parser.add_argument('--skip-data-checks', default=False, action='store_true', help='skip the data checks')
parser.add_argument('--skip-health-checks', default=False, action='store_true', help='skip the health checks')
args = parser.parse_args()
loader = unittest.TestLoader()
pattern = 'test*.py'
dir_path = os.path.dirname(os.path.realpath(__file__))
if (args.skip_data_checks):
suite = loader.discover(os.path.join(dir_path, 'tests/health_checks'))
elif (args.skip_health_checks):
suite = loader.discover(os.path.join(dir_path, 'tests/data_checks'))
else:
suite = loader.discover(dir_path)
postgap = pd.read_csv(args.filename, sep='\t', na_values=['None'])
suite_with_postgap = add_postgap(suite, postgap)
result = unittest.TextTestRunner(verbosity=2).run(suite_with_postgap)
sys.exit(not result.wasSuccessful())
| apache-2.0 |
jluttine/bayespy | bayespy/demos/pattern_search.py | 5 | 3944 | ################################################################################
# Copyright (C) 2015 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Demonstration of the pattern search method for PCA.
The pattern searches are compared to standard VB-EM algorithm in CPU
time. For more info on the pattern search method, see
:cite:`Honkela:2002`.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
import bayespy.plot as myplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy import nodes
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
from bayespy.demos import pca
def run(M=40, N=100, D_y=6, D=8, seed=42, rotate=False, maxiter=1000, debug=False, plot=True):
"""
Run pattern search demo for PCA.
"""
if seed is not None:
np.random.seed(seed)
# Generate data
w = np.random.normal(0, 1, size=(M,1,D_y))
x = np.random.normal(0, 1, size=(1,N,D_y))
f = misc.sum_product(w, x, axes_to_sum=[-1])
y = f + np.random.normal(0, 0.2, size=(M,N))
# Construct model
Q = pca.model(M, N, D)
# Data with missing values
mask = random.mask(M, N, p=0.5) # randomly missing
y[~mask] = np.nan
Q['Y'].observe(y, mask=mask)
# Initialize some nodes randomly
Q['X'].initialize_from_random()
Q['W'].initialize_from_random()
# Use a few VB-EM updates at the beginning
Q.update(repeat=10)
Q.save()
# Standard VB-EM as a baseline
Q.update(repeat=maxiter)
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'k-')
# Restore initial state
Q.load()
# Pattern search method for comparison
for n in range(maxiter):
Q.pattern_search('W', 'tau', maxiter=3, collapsed=['X', 'alpha'])
Q.update(repeat=20)
if Q.has_converged():
break
if plot:
bpplt.pyplot.plot(np.cumsum(Q.cputime), Q.L, 'r:')
bpplt.pyplot.xlabel('CPU time (in seconds)')
bpplt.pyplot.ylabel('VB lower bound')
bpplt.pyplot.legend(['VB-EM', 'Pattern search'], loc='lower right')
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["m=",
"n=",
"d=",
"k=",
"seed=",
"maxiter=",
"debug"])
except getopt.GetoptError:
print('python demo_pca.py <options>')
print('--m=<INT> Dimensionality of data vectors')
print('--n=<INT> Number of data vectors')
print('--d=<INT> Dimensionality of the latent vectors in the model')
print('--k=<INT> Dimensionality of the true latent vectors')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
print('--debug Check that the rotations are implemented correctly')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--rotate":
kwargs["rotate"] = True
elif opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--debug":
kwargs["debug"] = True
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--m",):
kwargs["M"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
elif opt in ("--d",):
kwargs["D"] = int(arg)
elif opt in ("--k",):
kwargs["D_y"] = int(arg)
run(**kwargs)
plt.show()
| mit |
willsirius/DualTreeRRTStartMotionPlanning | python/userdefined.py | 2 | 9319 | import time
import openravepy
import sys
import numpy as np
from numpy import sin,cos
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# import random
import transformationFunction as tf
import kdtree
import scipy.spatial as spatial
# def
def getpath(tree,goal):
# get the path from a RRT tree
# tree is in dictionary
# path , goal is list
path = [goal]
while 1:
if tree[tuple(path[0])] == tuple(path[0]):
break
path = [list(tree[tuple(path[0])])]+path
return path
def nodesDist(x,y):
return np.linalg.norm(np.asarray(x)-np.asarray(y))
def stepNodes(start,end,step):
# return a list of nodes start from the s to e, with a specific step
l = nodesDist(start,end)
if l <= step:
return [end]
else:
n = int(np.ceil(l/step))
delta = (np.asarray(end)-np.asarray(start))/l*step
nodes = []
for i in range(0,n-1):
nodes.append(list(np.asarray(start)+delta*(i+1)))
nodes.append(end)
return nodes
def step1Node(start,end,step):
# return a list of nodes start from the s to e, with a specific step
l = nodesDist(start,end)
if l <= step:
return end
else:
return list(np.asarray(start)+(np.asarray(end)-np.asarray(start))/l*step)
def plotHist(x):
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='green', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.show()
def limitTo(a,lower,upper):
if a <= lower:
return lower
if a >= upper:
return upper
return a
# sample a anlge from
def sampleCE(workspaceBound = [-4.5,3.5,-2.2,2.2,0.21,1.54]):
x = np.random.uniform(workspaceBound[0],workspaceBound[1])
y = np.random.uniform(workspaceBound[2],workspaceBound[3])
z = np.random.uniform(workspaceBound[4],workspaceBound[5])
q1 = np.random.uniform(0,2*np.pi)
q3 = np.random.uniform(0,2*np.pi)
while 1:
q2 = np.abs(np.random.normal(0,np.pi/4))
if q2 <= np.pi/2:
break
return [x,y,z,q1,q2,q3]
def sampleCQ(workspaceBound = [-4.5,3.5,-2.2,2.2,0.21,1.54]):
x = np.random.uniform(workspaceBound[0],workspaceBound[1])
y = np.random.uniform(workspaceBound[2],workspaceBound[3])
z = np.random.uniform(workspaceBound[4],workspaceBound[5])
q1 = np.random.uniform(0,2*np.pi)
q3 = 0 #np.random.uniform(0,2*np.pi)
while 1:
q2 = np.abs(np.random.normal(0,np.pi/4))
if q2 <= np.pi/2:
break
return [x,y,z] + list(tf.quaternion_from_euler(q1,q2,q3,'rzxz'))
def E2Q(x):
return x[0:3] + list(tf.quaternion_from_euler(x[3],x[4],x[5],'rzxz'))
def Q2R(Q):
# convert a quaternion to a rotation matrix
# input must be a unit quaternion
qw = Q[0]
qx = Q[1]
qy = Q[2]
qz = Q[3]
R = np.array([[1 - 2*qy**2 - 2*qz**2, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx**2 - 2*qz**2, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw ,1 - 2*qx**2 - 2*qy**2]])
return R
def genCQ(x,y,z,q1,q2,q3):
# generate a quaternion by parameters
sq32 = sin(q3/2)
sq1 = sin(q1)
print sq32
print sq1
return [x,y,z,cos(q3/2),sq32*sq1*cos(q2),sq32*sq1*sin(q2),sq32*cos(q1)]
def hat(v):
# hat map of a vector
# input an numpy array or list, output an numpy array
return np.array([[0,-v[2],v[1]],[v[2],0,-v[0]],[-v[1],v[0],0]])
def cross(a, b):
c = np.array([[a[1]*b[2] - a[2]*b[1]],
[a[2]*b[0] - a[0]*b[2]],
[a[0]*b[1] - a[1]*b[0]]])
return c
def updateState(s1,u,ts):
# update state x1 to x2 with control input u and time step ts
# s uses position vector and quaternion to represent
# s = [x,v,Q,W] Q is the position,velocity, attitude quaternion ad angular velocity
# the quaternion are translated to a rotation matrix for computation
# then the rotatoin matrix is converted to quaternion before return
# input and output are both lists
# u rotation speed of each motor
# a accelatation in inertial frame
# x position in inertial frame
# v velocity in inertial frame
# Q rotation quaternion of the body in the inertial frame
# W angular velocity in the body frame
# M moment vector in the body fixed frame
# m total mass of the drone
# Rd the derivetive of rotation matrix
# J inertia matrix
# ctf constant to convert force to torque: f*ctf = t
# MV moment vector f,mx,my,mz
J = np.array([[0.04,0,0],
[0,0.04,0],
[0,0,0.07]])
Jinv = np.array([[ 25. , 0. , 0. ],
[ 0. , 25. , 0. ],
[ 0. , 0. , 14.28571429]])
m = 1.85
d = 0.2
ctf = 0.008
g = 9.8
e3 = np.array([0,0,1])
MV = np.matmul(np.array([[1,1,1,1],[0,-d,0,d],[d,0,-d,0],[-ctf,ctf,-ctf,ctf]]),np.array([u[0],u[1],u[2],u[3]]))
f = MV[0]
M = MV[[1,2,3]]
x1 = np.array(s1[0:3])
v1 = np.array(s1[3:6])
Q1 = np.array(s1[6:10])
W1 = np.array(s1[10:13])
R1 = Q2R(Q1)
R1d = np.matmul(R1,hat(W1))
a = - g*e3+(f*np.matmul(R1,e3))/m
W1d = np.matmul( Jinv, M - np.cross(W1,np.matmul(J,W1)))
x2 = x1 + ts*v1
v2 = v1 + ts*a
R2 = R1 + ts*R1d
W2 = W1 + ts*W1d
R2t = np.identity(4)
R2t[0:3,0:3] = R2
Q2 = tf.quaternion_from_matrix(R2t)
s2 = list(x2)+list(v2)+list(Q2)+list(W2)
return s2
# print "test update state"
# s2 = [0,0,0,0,0,0,1,0,0,0,0,0,0]
# # s1 = [1,1,1,1,0,0,0,0.2,0.2,0.2,0.1,0.1,-0.1]
# u = [0,0,0,0]
# ts = 0.02
# t = range(0,100)
# for tt in t:
# s2 = updateState(s2,u,ts)
# x1 = np.array(s2[0:3])
# v1 = np.array(s2[3:6])
# Q1 = np.array(s2[6:10])
# W1 = np.array(s2[10:13])
# E1 = tf.euler_from_quaternion(Q1)
# print x1
# print v1
# print Q1
# print W1
# axarr[0, 0].plot(x, y)
# axarr[0, 0].set_title('Axis [0,0]')
# axarr[0, 1].scatter(x, y)
# axarr[0, 1].set_title('Axis [0,1]')
# axarr[1, 0].plot(x, y ** 2)
# axarr[1, 0].set_title('Axis [1,0]')
# axarr[1, 1].scatter(x, y ** 2)
# axarr[1, 1].set_title('Axis [1,1]')
# # Fine-tune figure; hide x ticks for top plots and y ticks for right plots
# plt.setp([a.get_xticklabels() for a in axarr[0, :]], visible=False)
# plt.setp([a.get_yticklabels() for a in axarr[:, 1]], visible=False)
# q = [1,0,0,0]
# q0 = tf.random_quaternion()
# r0 = Q2R(q0)
# print hat([1,2,3])
# print tf.euler_from_matrix(r0)
# print tf.euler_from_quaternion(q0)
# print hat([1,2,3])
# print [1,2,3,4][3]
# v = [1,2,3]
# np.array([0,-v[2],v[1]],[v[2],0,-v[0]],[-v[1],v[0],0])
# print sampleRotation()
# # print np.random.normal(0, 3.14, 1)
# eM = tf.euler_matrix(0,0,1.57)
# print eM
# print np.random.uniform(0,3)
# # print 1
# print tf.random_rotation_matrix()
# print np.dot(tf.random_quaternion(),tf.random_quaternion())
# print np.matmul(tf.random_rotation_matrix(),tf.random_rotation_matrix())
# start = tf.random_quaternion();
# print start
# print tuple(start)
# a = {tuple(start):tuple(start)}
# print a
# print a[tuple(start)]
# x = [sampleC()];
# KDtree = kdtree.create(x)
# print x
# for i in range(0,200):
# # x.append(sampleC()[5])
# newnode =sampleC()
# x.append(newnode)
# KDtree.add(newnode)
# # print x
# kdtree.visualize(KDtree)
# node = sampleC()
# print node
# a = KDtree.search_nn(node)[0].data
# print a
# aa = 1000
# for i in x:
# # print "this is i"
# # print np.asarray(i)
# # print type(np.asarray(i))
# # print np.linalg.norm(np.asarray(i),np.asarray(i))
# aa = min(aa,np.linalg.norm(np.asarray(i)-np.asarray(node)))
# print aa
# print np.linalg.norm(np.asarray(a)-np.asarray(node))
# print nodesDist(1,3)
# print nodesDist([1,2,3],[4,5,6])
# print np.power(nodesDist([[2,3,4],[2,3,4]],[[1,2,3],[1,2,3]]),2)
# print np.asarray([[2,3,4],[2,3,4]])
# print np.floor(3.4)
# yy = [];
# yy.append([1,2,3])
# yy.append([1,2,5])
# print yy
# print ""
# print step1Node([30,40],[0,0.1],5)
# a = {(2,3):(1,2),(1,2):(1,2),(3,4):(1,2),(5,6):(3,4),(9,8):(3,4)};
# print a
# print getpath(a,[5,6])
# print ""
# points = np.array([ (3, 4), (1, 2),(4, 5),(6,7),(2,5),(2,4)])
# points = [[1,2],[4,5],[5,2]]
# point_tree = spatial.KDTree(points)
# This finds the index of all points within distance 1 of [1.5,2.5].
# print(point_tree.query_ball_point([1.5, 2.5], 2))
# print point_tree.query([1.5, 2.5])
# print point_tree.data[point_tree.query([1.5, 2.5])[1]]
# [0]
# # This gives the point in the KDTree which is within 1 unit of [1.5, 2.5]
# print(point_tree.data[point_tree.query_ball_point([1.5, 2.5], 1)])
# # [[1 2]]
# # More than one point is within 3 units of [1.5, 1.6].
# print(point_tree.data[point_tree.query_ball_point([1.5, 1.6], 3)])
# # [[1 2]
# # [3 4]]
# x = []
# for i in range(0,1000):
# while 1:
# q1 = np.random.normal(np.pi/4,np.pi/8)
# if np.abs(q1-np.pi/4) <= np.pi/4:
# break
# x.append(q1)
# plotHist(x)
# startconfig = [ 4.0,-1.5 ,0.2 ,1 ,0.0, 0.0, 0.0 ]
# print E2Q(startconfig) | mit |
Ossada/DLS-UVVis | slider.py | 1 | 1207 | __author__ = 'vid'
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
a0 = 5
f0 = 3
s = a0*np.sin(2*np.pi*f0*t)
l, = plt.plot(t,s, lw=2, color='red')
plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.1, 0.1, 0.65, 0.03], axisbg=axcolor)
axamp = plt.axes([0.25, 0.2, 0.65, 0.03], axisbg=axcolor)
sfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
sfreq.on_changed(update)
samp.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sfreq.reset()
samp.reset()
button.on_clicked(reset)
# rax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)
# radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
# def colorfunc(label):
# l.set_color(label)
# fig.canvas.draw_idle()
# radio.on_clicked(colorfunc)
plt.show() | mit |
microelly2/reconstruction | reconstruction/say.py | 1 | 1170 |
import FreeCAD
import FreeCADGui
App=FreeCAD
Gui=FreeCADGui
import PySide
from PySide import QtCore, QtGui
import FreeCAD
import Draft, Part, Animation
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import os,random,time,sys,traceback
def log(s):
logon = False
if logon:
f = open('/tmp/log.txt', 'a')
f.write(str(s) +'\n')
f.close()
def sayd(s):
if hasattr(FreeCAD,'animation_debug'):
pass
log(str(s))
FreeCAD.Console.PrintMessage(str(s)+"\n")
def say(s):
log(str(s))
FreeCAD.Console.PrintMessage(str(s)+"\n")
def sayErr(s):
log(str(s))
FreeCAD.Console.PrintError(str(s)+"\n")
def sayW(s):
log(str(s))
FreeCAD.Console.PrintWarning(str(s)+"\n")
def errorDialog(msg):
diag = QtGui.QMessageBox(QtGui.QMessageBox.Critical,u"Error Message",msg )
diag.setWindowFlags(PySide.QtCore.Qt.WindowStaysOnTopHint)
diag.exec_()
def sayexc(mess=''):
exc_type, exc_value, exc_traceback = sys.exc_info()
ttt=repr(traceback.format_exception(exc_type, exc_value,exc_traceback))
lls=eval(ttt)
l=len(lls)
l2=lls[(l-3):]
FreeCAD.Console.PrintError(mess + "\n" +"--> ".join(l2))
| lgpl-3.0 |
fengzhyuan/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
jreback/pandas | pandas/tests/extension/test_string.py | 1 | 3885 | import string
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas.core.arrays.string_ import StringDtype
from pandas.core.arrays.string_arrow import ArrowStringDtype
from pandas.tests.extension import base
@pytest.fixture(
params=[
StringDtype,
pytest.param(
ArrowStringDtype, marks=td.skip_if_no("pyarrow", min_version="1.0.0")
),
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
strings = np.random.choice(list(string.ascii_letters), size=100)
while strings[0] == strings[1]:
strings = np.random.choice(list(string.ascii_letters), size=100)
return dtype.construct_array_type()._from_sequence(strings)
@pytest.fixture
def data_missing(dtype):
"""Length 2 array with [NA, Valid]"""
return dtype.construct_array_type()._from_sequence([pd.NA, "A"])
@pytest.fixture
def data_for_sorting(dtype):
return dtype.construct_array_type()._from_sequence(["B", "C", "A"])
@pytest.fixture
def data_missing_for_sorting(dtype):
return dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"])
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
return dtype.construct_array_type()._from_sequence(
["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"]
)
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
def test_view(self, data, request):
if isinstance(data.dtype, ArrowStringDtype):
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_view(data)
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
def test_transpose(self, data, dtype, request):
if isinstance(dtype, ArrowStringDtype):
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_transpose(data)
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
def test_setitem_preserves_views(self, data, dtype, request):
if isinstance(dtype, ArrowStringDtype):
mark = pytest.mark.xfail(reason="not implemented")
request.node.add_marker(mark)
super().test_setitem_preserves_views(data)
class TestMissing(base.BaseMissingTests):
pass
class TestNoReduce(base.BaseNoReduceTests):
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
op_name = all_numeric_reductions
if op_name in ["min", "max"]:
return None
s = pd.Series(data)
with pytest.raises(TypeError):
getattr(s, op_name)(skipna=skipna)
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="returns nullable")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
@pytest.mark.skip(reason="returns nullable")
def test_value_counts_with_normalize(self, data):
pass
class TestCasting(base.BaseCastingTests):
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
result = getattr(s, op_name)(other)
expected = getattr(s.astype(object), op_name)(other).astype("boolean")
self.assert_series_equal(result, expected)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, "abc")
class TestParsing(base.BaseParsingTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestGroupBy(base.BaseGroupbyTests):
pass
| bsd-3-clause |
pyspeckit/pyspeckit | scripts/pyspeckit_script.py | 7 | 4227 | #/bin/env ipython -i --matplotlib
"""
pyspeckit command line startup script
"""
from __future__ import print_function
import sys
# remove script file's parent directory from path
# (otherwise, can't import pyspeckit)
#sys.path.pop(0)
from pyspeckit.spectrum.classes import Spectrum, Spectra
from pyspeckit.cubes.SpectralCube import Cube, CubeStack
from pyspeckit import wrappers as pw
import optparse
import os
import re
if __name__ == "__main__":
import matplotlib
import itertools
import pylab
parser=optparse.OptionParser()
parser.add_option("--verbose","-v",help="Be loud? Default True",default=False,action='store_true')
parser.add_option("--debug","-d",help="Debug mode. Default False",default=False,action='store_true')
parser.add_option("--doplot","-p",help="Plot? Default True",default=True)
parser.add_option("--fitgaussian",help="Fit a gaussian?",default=False,action='store_true')
parser.add_option("--fitnh3",help="Fit NH3?",default=False,action='store_true')
parser.add_option("--threed",'--3d','--cube',help="Data cube?",default=False,action='store_true')
parser.add_option("--filetype",help="File type to use.", default=None)
parser.add_option("--smooth",help="Smooth the spectrum (by how much)?",default=False)
parser.add_option("--wcstype",help="What wcstype to use? Can be a list: A,B,C,T,V where elements correspond to input spectra",default=None)
parser.add_option("--specnum",help="What specnum?",default=0)
parser.add_option("--hdu",help="What HDU number?",default=None)
parser.add_option("--unmerged",help="[tspec only] Is the tspec file NOT merged?",default=False,action='store_true')
options,args = parser.parse_args()
verbose = options.verbose
if verbose:
print("Args: ",args)
print("Options: ",options)
if options.debug:
print("DEBUG MODE. Using a different excepthook.")
def info(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print()
# ...then start the debugger in post-mortem mode.
pdb.pm()
sys.excepthook = info
specnum = int(options.specnum)
if options.wcstype:
if "," in options.wcstype:
wcstype = options.wcstype.split(",")
else:
wcstype = options.wcstype
else:
wcstype = ''
# specify kwargs before passing both for brevity and to allow
# for some kwargs to not be specified at all
kwargs = {'specnum':specnum,
'wcstype':wcstype,
'verbose':verbose,
'filetype':options.filetype}
if options.hdu is not None:
kwargs['hdu'] = int(options.hdu)
if len(args) > 1:
if options.threed:
cubelist = [Cube(fname) for fname in args]
splist = cubelist
cube = CubeStack(cubelist)
options.doplot = False
elif len(wcstype) == len(args):
splist = [Spectrum(a, **kwargs) for a, w in
zip(args, wcstype)]
sp = Spectra(splist)
else:
splist = [Spectrum(a,**kwargs) for a in args]
sp = Spectra(splist)
linestyles = itertools.cycle(["steps-mid","steps-mid--"])
colors = itertools.cycle(matplotlib.cm.spectral(pylab.linspace(0,1,len(splist))))
else:
if len(wcstype)==1:
sp = Spectrum(*args,**kwargs)
else:
if options.threed:
cube = Cube(*args)
options.doplot = False
else:
sp = Spectrum(*args,**kwargs)
if options.smooth > 0: sp.smooth(float(options.smooth))
if options.doplot: sp.plotter()
if options.fitgaussian:
sp.specfit()
if options.fitnh3:
pw.fitnh3.fitnh3(sp)
import IPython
IPython.embed()
| mit |
TREE-Edu/speaker-rec-skill-test | data/remove-silence.py | 1 | 2752 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# $File: VAD.py
# $Date: Thu Dec 26 15:33:37 2013 +0800
# $Author: Xinyu Zhou <zxytim[at]gmail[dot]com>
import sys
import os
import glob
import scipy.io.wavfile as wavfile
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing
def mkdirp(dirname):
try:
os.makedirs(dirname)
except OSError as err:
if err.errno!=17:
raise
def remove_silence(fs, signal,
frame_duration = 0.02,
frame_shift = 0.01,
perc = 0.01):
orig_dtype = type(signal[0])
typeinfo = np.iinfo(orig_dtype)
is_unsigned = typeinfo.min >= 0
signal = signal.astype(np.int64)
if is_unsigned:
signal = signal - typeinfo.max / 2
siglen = len(signal)
retsig = np.zeros(siglen, dtype = np.int64)
frame_length = frame_duration * fs
frame_shift_length = frame_shift * fs
new_siglen = 0
i = 0
# NOTE: signal ** 2 where signal is a numpy array
# interpret an unsigned integer as signed integer,
# e.g, if dtype is uint8, then
# [128, 127, 129] ** 2 = [0, 1, 1]
# so the energy of the signal is somewhat
# right
average_energy = np.sum(signal ** 2) / float(siglen)
while i < siglen:
subsig = signal[i:i + frame_length]
ave_energy = np.sum(subsig ** 2) / float(len(subsig))
if ave_energy < average_energy * perc:
i += frame_length
else:
sigaddlen = min(frame_shift_length, len(subsig))
retsig[new_siglen:new_siglen + sigaddlen] = subsig[:sigaddlen]
new_siglen += sigaddlen
i += frame_shift_length
retsig = retsig[:new_siglen]
if is_unsigned:
retsig = retsig + typeinfo.max / 2
return fs, retsig.astype(orig_dtype)
def task(fpath, new_fpath):
fs, signal = wavfile.read(fpath)
fs_out, signal_out = remove_silence(fs, signal)
wavfile.write(new_fpath, fs_out, signal_out)
return fpath
def main():
if len(sys.argv) != 3:
print("Usage: {} <orignal_dir> <output_dir>" . format(sys.argv[0]))
sys.exit(1)
ORIG_DIR, OUTPUT_DIR = sys.argv[1:]
pool = multiprocessing.Pool(4)
result = []
for style in glob.glob(os.path.join(ORIG_DIR, '*')):
dirname = os.path.basename(style)
for fpath in glob.glob(os.path.join(style, '*.wav')):
fname = os.path.basename(fpath)
new_fpath = os.path.join(OUTPUT_DIR, dirname, fname)
mkdirp(os.path.dirname(new_fpath))
result.append(pool.apply_async(task, args = (fpath, new_fpath)))
pool.close()
for r in result:
print(r.get())
if __name__ == '__main__':
main()
# vim: foldmethod=marker
| apache-2.0 |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/svm/tests/test_bounds.py | 42 | 2112 | import nose
from nose.tools import assert_true
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'l2': LinearSVC(loss='l2', penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| apache-2.0 |
WindCanDie/spark | python/pyspark/sql/session.py | 3 | 37286 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
xrange = range
else:
from itertools import izip as zip, imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, TimestampType, \
_make_type_verifier, _infer_schema, _has_nulltype, _merge_type, _create_converter, \
_parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
.. autoattribute:: builder
:annotation:
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
_sc = None
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
def _sparkContext(self, sc):
with self._lock:
self._sc = sc
return self
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
if self._sc is not None:
sc = self._sc
else:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
# This SparkContext may be an existing one.
sc = SparkContext.getOrCreate(sparkConf)
# Do not update `SparkConf` for existing `SparkContext`, as it's shared
# by all sessions.
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
return session
builder = Builder()
"""A class attribute having a :class:`Builder` to construct :class:`SparkSession` instances"""
_instantiatedSession = None
_activeSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
if self._jvm.SparkSession.getDefaultSession().isDefined() \
and not self._jvm.SparkSession.getDefaultSession().get() \
.sparkContext().isStopped():
jsparkSession = self._jvm.SparkSession.getDefaultSession().get()
else:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
SparkSession._activeSession = self
self._jvm.SparkSession.setDefaultSession(self._jsparkSession)
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@classmethod
@since(3.0)
def getActiveSession(cls):
"""
Returns the active SparkSession for the current thread, returned by the builder.
>>> s = SparkSession.getActiveSession()
>>> l = [('Alice', 1)]
>>> rdd = s.sparkContext.parallelize(l)
>>> df = s.createDataFrame(rdd, ['name', 'age'])
>>> df.select("age").collect()
[Row(age=1)]
"""
from pyspark import SparkContext
sc = SparkContext._active_spark_context
if sc is None:
return None
else:
if sc._jvm.SparkSession.getActiveSession().isDefined():
SparkSession(sc, sc._jvm.SparkSession.getActiveSession().get())
return SparkSession._activeSession
else:
return None
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
:return: :class:`Catalog`
"""
from pyspark.sql.catalog import Catalog
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.udf import UDFRegistration
return UDFRegistration(self)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data, names=None):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:param names: list of column names
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None, names=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first, names=names)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row, names=names))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio, names=schema)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data, names=schema)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
def _get_numpy_record_dtype(self, rec):
"""
Used when converting a pandas.DataFrame to Spark using to_records(), this will correct
the dtypes of fields in a record so they can be properly loaded into Spark.
:param rec: a numpy record to check field dtypes
:return corrected dtype for a numpy.record or None if no correction needed
"""
import numpy as np
cur_dtypes = rec.dtype
col_names = cur_dtypes.names
record_type_list = []
has_rec_fix = False
for i in xrange(len(cur_dtypes)):
curr_type = cur_dtypes[i]
# If type is a datetime64 timestamp, convert to microseconds
# NOTE: if dtype is datetime[ns] then np.record.tolist() will output values as longs,
# conversion from [us] or lower will lead to py datetime objects, see SPARK-22417
if curr_type == np.dtype('datetime64[ns]'):
curr_type = 'datetime64[us]'
has_rec_fix = True
record_type_list.append((str(col_names[i]), curr_type))
return np.dtype(record_type_list) if has_rec_fix else None
def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records]
def _create_from_pandas_with_arrow(self, pdf, schema, timezone):
"""
Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting
to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the
data types will be used to coerce the data in Pandas to Arrow conversion.
"""
from pyspark.serializers import ArrowStreamSerializer, _create_batch
from pyspark.sql.types import from_arrow_schema, to_arrow_type, TimestampType
from pyspark.sql.utils import require_minimum_pandas_version, \
require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# Determine arrow types to coerce data when creating batches
if isinstance(schema, StructType):
arrow_types = [to_arrow_type(f.dataType) for f in schema.fields]
elif isinstance(schema, DataType):
raise ValueError("Single data type %s is not supported with Arrow" % str(schema))
else:
# Any timestamps must be coerced to be compatible with Spark
arrow_types = [to_arrow_type(TimestampType())
if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None
for t in pdf.dtypes]
# Slice the DataFrame to be batched
step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up
pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step))
# Create Arrow record batches
safecheck = self._wrapped._conf.arrowSafeTypeConversion()
batches = [_create_batch([(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)],
timezone, safecheck)
for pdf_slice in pdf_slices]
# Create the Spark schema from the first Arrow batch (always at least 1 batch after slicing)
if isinstance(schema, (list, tuple)):
struct = from_arrow_schema(batches[0].schema)
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
jsqlContext = self._wrapped._jsqlContext
def reader_func(temp_filename):
return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename)
def create_RDD_server():
return self._jvm.ArrowRDDServer(jsqlContext)
# Create Spark DataFrame from Arrow stream file, using one batch per partition
jrdd = self._sc._serialize_to_jvm(batches, ArrowStreamSerializer(), reader_func,
create_RDD_server)
jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext)
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@staticmethod
def _create_shell_session():
"""
Initialize a SparkSession for a pyspark shell session. This is called from shell.py
to make error handling simpler without needing to declare local variables in that
script, which would expose those to users.
"""
import py4j
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
try:
# Try to access HiveConf, it will raise exception if Hive is not added
conf = SparkConf()
if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive':
SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf()
return SparkSession.builder\
.enableHiveSupport()\
.getOrCreate()
else:
return SparkSession.builder.getOrCreate()
except (py4j.protocol.Py4JError, TypeError):
if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive':
warnings.warn("Fall back to non-hive support because failing to access HiveConf, "
"please make sure you build spark with hive")
return SparkSession.builder.getOrCreate()
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
SparkSession._activeSession = self
self._jvm.SparkSession.setActiveSession(self._jsparkSession)
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
elif isinstance(schema, (list, tuple)):
# Must re-encode any unicode strings to be consistent with StructField names
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
if self._wrapped._conf.pandasRespectSessionTimeZone():
timezone = self._wrapped._conf.sessionLocalTimeZone()
else:
timezone = None
# If no schema supplied by user then get the names of columns only
if schema is None:
schema = [str(x) if not isinstance(x, basestring) else
(x.encode('utf-8') if not isinstance(x, str) else x)
for x in data.columns]
if self._wrapped._conf.arrowEnabled() and len(data) > 0:
try:
return self._create_from_pandas_with_arrow(data, schema, timezone)
except Exception as e:
from pyspark.util import _exception_message
if self._wrapped._conf.arrowFallbackEnabled():
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
else:
msg = (
"createDataFrame attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
data = self._convert_from_pandas(data, schema, timezone)
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
# We should clean the default session up. See SPARK-23228.
self._jvm.SparkSession.clearDefaultSession()
self._jvm.SparkSession.clearActiveSession()
SparkSession._instantiatedSession = None
SparkSession._activeSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Stanford-Online/edx-analytics-pipeline | edx/analytics/tasks/tests/acceptance/test_lms_courseware_link_clicked.py | 1 | 2697 | """
End-to-end test of the workflow to load the warehouse's lms_courseware_link_clicked_events table.
"""
import datetime
import logging
import os
import pandas
from edx.analytics.tasks.tests.acceptance import AcceptanceTestCase, when_vertica_available
log = logging.getLogger(__name__)
class LmsCoursewareLinkClickedAcceptanceTest(AcceptanceTestCase):
"""
Runs the MapReduce job that uploads LMS courseware link click data to Vertica, then queries that data
and compares it to the expected output.
"""
INPUT_FILE = 'lms_courseware_link_clicked_acceptance_tracking.log'
DATE = datetime.date(2016, 6, 13)
@when_vertica_available
def test_lms_courseware_link_clicked(self):
"""Tests the workflow for the lms_courseware_link_clicked_events table, end to end."""
self.upload_tracking_log(self.INPUT_FILE, self.DATE)
self.task.launch([
'PushToVerticaLMSCoursewareLinkClickedTask',
'--output-root', self.test_out,
'--interval', str(2016),
'--n-reduce-tasks', str(self.NUM_REDUCERS)
])
self.validate_output()
def validate_output(self):
"""Validates the output, comparing it to a csv of all the expected output from this workflow."""
with self.vertica.cursor() as cursor:
expected_output_csv = os.path.join(
self.data_dir,
'output',
'acceptance_expected_lms_courseware_link_clicked_events.csv'
)
def convert_date(date_string):
"""Convert date string to a date object."""
return datetime.datetime.strptime(date_string, '%Y-%m-%d').date()
expected = pandas.read_csv(expected_output_csv, converters={'event_date': convert_date})
cursor.execute(
"SELECT * FROM {schema}.lms_courseware_link_clicked_events ORDER BY course_id, event_date"
.format(schema=self.vertica.schema_name)
)
response = cursor.fetchall()
lms_courseware_link_clicked_events = pandas.DataFrame(
response,
columns=[
'record_number',
'course_id',
'event_date',
'external_link_clicked_events',
'link_clicked_events'
]
)
for frame in (lms_courseware_link_clicked_events, expected):
frame.sort(['record_number'], inplace=True, ascending=[True])
frame.reset_index(drop=True, inplace=True)
self.assert_data_frames_equal(lms_courseware_link_clicked_events, expected)
| agpl-3.0 |
ahoarfrost/metaseek | server/scrapers/SRA/SRA_scrape.py | 1 | 11674 | # -*- encoding: utf-8 -*-
#test adding runs to db
import sys
sys.path.append('../..')
sys.path.append('..')
from app import db
from pymysql import err
from sqlalchemy import exc
from SRA_scrape_fns import *
from models import *
from shared import *
from sklearn.externals import joblib
metaseek_fields = ['db_source_uid',
'db_source',
'expt_link',
'expt_id',
'expt_title',
'expt_design_description',
'library_name',
'library_strategy',
'library_source',
'library_screening_strategy',
'library_construction_method',
'library_construction_protocol',
'sequencing_method',
'instrument_model',
'submission_id',
'organization_name',
'organization_address',
'organization_contacts',
'study_id',
'bioproject_id',
'study_title',
'study_type',
'study_type_other',
'study_abstract',
'study_links',
'study_attributes',
'sample_id',
'biosample_id',
'sample_title',
'ncbi_taxon_id',
'taxon_scientific_name',
'taxon_common_name',
'sample_description',
'num_runs_in_accession',
'run_ids_maxrun',
'library_reads_sequenced_maxrun',
'total_num_bases_maxrun',
'download_size_maxrun',
'avg_read_length_maxrun',
'baseA_count_maxrun',
'baseC_count_maxrun',
'baseG_count_maxrun',
'baseT_count_maxrun',
'baseN_count_maxrun',
'gc_percent_maxrun',
'run_quality_counts_maxrun',
'biosample_uid',
'biosample_link',
'metadata_publication_date',
'biosample_package',
'biosample_models',
'sample_attributes',
'investigation_type',
'env_package',
'project_name',
'lat_lon',
'latitude',
'longitude',
'meta_latitude',
'meta_longitude',
'geo_loc_name',
'collection_date',
'collection_time',
'env_biome',
'env_feature',
'env_material',
'depth',
'elevation',
'altitude',
'target_gene',
'target_subfragment',
'ploidy',
'num_replicons',
'estimated_size',
'ref_biomaterial',
'propagation',
'assembly',
'finishing_strategy',
'isol_growth_condt',
'experimental_factor',
'specific_host',
'subspecific_genetic_lineage',
'tissue',
'sex',
'sample_type',
'age',
'dev_stage',
'biomaterial_provider',
'host_disease',
'date_scraped',
'metaseek_investigation_type',
'metaseek_investigation_type_P',
'metaseek_mixs_specification',
'metaseek_mixs_specification_P',
'metaseek_env_package',
'metaseek_sequencing_method']
run_fields = ['dataset_id',
'run_id',
'library_reads_sequenced',
'total_num_bases',
'download_size',
'avg_read_length',
'baseA_count',
'baseC_count',
'baseG_count',
'baseT_count',
'baseN_count',
'gc_percent',
'run_quality_counts']
pubmed_fields = ['pubmed_uid',
'pubmed_link',
'pub_publication_date',
'pub_authors',
'pub_title',
'pub_volume',
'pub_issue',
'pub_pages',
'pub_journal',
'pub_doi',
'datasets']
if __name__ == "__main__":
#make list of all publicly available UIDs in SRA
retstart_list = get_retstart_list(url='https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=sra&term=public&field=ACS&rettype=count&tool=metaseq&email=metaseekcloud%40gmail.com')
uid_list = get_uid_list(ret_list=retstart_list)
#remove SRA IDs that have already been ingested into MetaSeek DB; db_source_uids for which 'source_db' is 'SRA'
print "Removing uids already in MetaSeek..."
result = db.session.query(Dataset.db_source_uid).filter(Dataset.db_source=='SRA').distinct()
existing_uids = [r.db_source_uid for r in result]
#subtract any uids already in db from uid_list
uids_to_scrape = list(set(uid_list)-set(existing_uids))
print "...REMAINING NUMBER OF UIDS TO SCRAPE: %s" % (len(uids_to_scrape))
#split UIDs to scrape into batches of 500 (max number of UIDs can call with eutilities api at one time)
batches = get_batches(uids_to_scrape, batch_size=200)
#for each batch of 500 UIDs, scrape metadata
for batch_ix,batch in enumerate(batches):
print "PROCESSING BATCH %s OUT OF %s......" % (batch_ix+1,len(batches))
batch_uid_list = map(int,uids_to_scrape[batch[0]:batch[1]])
print "-%s UIDs to scrape in this batch...... %s..." % (len(batch_uid_list),batch_uid_list[0:10])
#scrape sra metadata, return as dictionary of dictionaries; each sdict key is the SRA UID, value is a dictionary of srx metadata key/value pairs; rdict is for individual runs with key run_id
print "-scraping SRX metadata..."
try:
sdict, rdict = get_srx_metadata(batch_uid_list=batch_uid_list)
except (EutilitiesConnectionError,EfetchError) as msg:
print msg,"; skipping this batch"
continue
#get link uids for any links to biosample or pubmed databases so can go scrape those too
print "-getting elinks..."
try:
sdict, linkdict = get_links(batch_uid_list=batch_uid_list,sdict=sdict)
except EutilitiesConnectionError as msg: #if can't get links for srx, skip entire batch and don't write data
print msg,"; skipping this batch"
continue
#efetch for batch/es of biosamples; generate bdict dictionary of dictionaries {'bio#':{},'bio##':{}...}
biosample_batches = get_batches(uid_list=linkdict['biosample_uids']) #split biosamples into batches of 500 (if there's less than 500 there will only be one batch)
bdict = {}
for b_batch_ix,b_batch in enumerate(biosample_batches): #scrape biosample data for all the biosamples, in batches of 500
print "--processing biosample batch %s out of %s......" % (b_batch_ix+1,len(biosample_batches))
biosample_batch_uids = map(int,linkdict['biosample_uids'][b_batch[0]:b_batch[1]])
try:
bdict = get_biosample_metadata(batch_uid_list=biosample_batch_uids,bdict=bdict)
except EutilitiesConnectionError, msg:
print msg, "; skipping this biosample batch"
continue
#efetch for batch/es of pubmeds; generate pdict dictionary of dictionaries {'pub#':{},'pub#':{},...}
pubmed_batches = get_batches(uid_list=linkdict['pubmed_uids'])
pdict = {}
for p_batch_ix,p_batch in enumerate(pubmed_batches):
print "--processing pubmed batch %s out of %s......" % (p_batch_ix+1,len(pubmed_batches))
pubmed_batch_uids = map(int,linkdict['pubmed_uids'][p_batch[0]:p_batch[1]])
try:
pdict = get_pubmed_metadata(batch_uid_list=pubmed_batch_uids,pdict=pdict)
except EutilitiesConnectionError as msg:
print msg, "; skipping this pubmed batch"
continue
#merge sdict with scraped biosample/pubmed metadata
print "-merging scrapes"
sdict = merge_scrapes(sdict=sdict,bdict=bdict,pdict=pdict)
#extract and merge MIxS fields from 'sample_attributes' field in each dict in sdict (if exists)
print "-extracting and merging MIxS fields"
sdict = extract_and_merge_mixs_fields(sdict=sdict,fieldname="sample_attributes",rules_json="rules.json")
#load in rules and model for extracting metaseek_power fields
with open("CVparse_rules.json") as json_file:
manual_rules = json.load(json_file)
json_file.close()
with open("CVparse_manualtree_rules.json") as tree_file:
tree_rules = json.load(tree_file)
tree_file.close()
investigation_model = joblib.load("investigation_type_logreg_model.pkl")
with open("model_features.json") as json_file:
model_features = json.load(json_file)
json_file.close()
print "-extracting metaseek_power fields and changing sample_attributes to str field"
for srx in sdict.keys():
#extract metaseek_power fields for this srx
extract_metaseek_power_fields(sdict, srx, manual_rules=manual_rules, tree_rules=tree_rules, investigation_model=investigation_model, model_features=model_features)
#coerce sample attributes field to str for db insertion
if 'sample_attributes' in sdict[srx].keys():
sdict[srx]['sample_attributes'] = json.dumps(sdict[srx]['sample_attributes'])
#clean up sdict so that any nan or na values (or values that should be na) are None
na_values = ['NA','','Missing','missing','unspecified','not available','not given','Not available',None,[],{},'[]','{}','not applicable','Not applicable','Not Applicable','N/A','n/a','not provided','Not Provided','Not provided','unidentified']
for srx in sdict.keys():
sdict[srx] = {k:sdict[srx][k] for k in sdict[srx].keys() if sdict[srx][k] not in na_values}
#add parsed metaseek lat/lon values if possible
if 'lat_lon' in sdict[srx]:
meta_latitude, meta_longitude = parseLatLon(sdict[srx]['lat_lon'])
sdict[srx]['meta_latitude'] = meta_latitude
sdict[srx]['meta_longitude'] = meta_longitude
if 'latitude' in sdict[srx] and 'longitude' in sdict[srx]:
sdict[srx]['meta_latitude'] = parseLatitude(sdict[srx]['latitude'])
sdict[srx]['meta_longitude'] = parseLongitude(sdict[srx]['longitude'])
##TODO: check whether if biosample_uids exists, and no biosample attribs added; log to scrapeerrors if so; same for pubmeds
print "-writing data to database..."
for srx in sdict.keys():
#add date scraped field as right now!
sdict[srx]['date_scraped'] = datetime.now()
#get row in correct order keys
row_to_write = [sdict[srx][x] if x in sdict[srx].keys() else None for x in metaseek_fields]
newDataset = Dataset(*row_to_write)
#add newdataset and commit to get new id
db.session.add(newDataset)
try:
db.session.commit()
except (exc.DataError, err.DataError) as e:
db.session.rollback()
#if one of the columns was too long, log error and skip this srx
errorToWrite = ScrapeError(uid=str(srx),error_msg="DataError: "+str(e),function="writing Dataset to db",date_scraped=datetime.now())
db.session.add(errorToWrite)
db.session.commit()
continue
if 'pubmed_uids' in sdict[srx].keys():
for pub in sdict[srx]["pubmed_uids"]:
if pub is not None:
pub = str(pub)
if pub in pdict.keys():
pub_data = [pdict[pub][x] if x in pdict[pub].keys() else None for x in pubmed_fields]
newPub = Publication(*pub_data)
newPub.datasets.append(newDataset)
db.session.add(newPub)
try:
db.session.commit()
except (exc.IntegrityError, err.IntegrityError) as e: #if pubmed already exists
db.session.rollback()
existing_pub = db.session.query(Publication).filter(Publication.pubmed_uid==pub).first()
existing_pub.datasets.append(newDataset)
db.session.commit()
if "run_ids" in sdict[srx].keys():
for run in sdict[srx]["run_ids"]:
if run is not None:
run_data = [rdict[run][x] if x in rdict[run].keys() else None for x in run_fields]
newRun = Run(*run_data)
newDataset.runs.append(newRun)
db.session.add(newRun)
#commit all those new runs
db.session.commit()
##TODO: log date and time of update, num accessions added, etc. Separate db table?
print "BATCH %s COMPLETE!" % (batch_ix+1)
| mit |
yask123/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
SANDAG/urbansim | urbansim/models/regression.py | 5 | 33858 | """
Use the ``RegressionModel`` class to fit a model using statsmodels'
OLS capability and then do subsequent prediction.
"""
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from patsy import dmatrix
from prettytable import PrettyTable
from zbox import toolz as tz
from . import util
from ..exceptions import ModelEvaluationError
from ..utils import yamlio
from ..utils.logutil import log_start_finish
logger = logging.getLogger(__name__)
def fit_model(df, filters, model_expression):
"""
Use statsmodels OLS to construct a model relation.
Parameters
----------
df : pandas.DataFrame
Data to use for fit. Should contain all the columns
referenced in the `model_expression`.
filters : list of str
Any filters to apply before doing the model fit.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
Returns
-------
fit : statsmodels.regression.linear_model.OLSResults
"""
df = util.apply_filter_query(df, filters)
model = smf.ols(formula=model_expression, data=df)
if len(model.exog) != len(df):
raise ModelEvaluationError(
'Estimated data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
with log_start_finish('statsmodels OLS fit', logger):
return model.fit()
def predict(df, filters, model_fit, ytransform=None):
"""
Apply model to new data to predict new dependent values.
Parameters
----------
df : pandas.DataFrame
filters : list of str
Any filters to apply before doing prediction.
model_fit : statsmodels.regression.linear_model.OLSResults
Result of model estimation.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `df`
after applying filters.
"""
df = util.apply_filter_query(df, filters)
with log_start_finish('statsmodels predict', logger):
sim_data = model_fit.predict(df)
if len(sim_data) != len(df):
raise ModelEvaluationError(
'Predicted data does not have the same length as input. '
'This suggests there are null values in one or more of '
'the input columns.')
if ytransform:
sim_data = ytransform(sim_data)
return pd.Series(sim_data, index=df.index)
def _rhs(model_expression):
"""
Get only the right-hand side of a patsy model expression.
Parameters
----------
model_expression : str
Returns
-------
rhs : str
"""
if '~' not in model_expression:
return model_expression
else:
return model_expression.split('~')[1].strip()
class _FakeRegressionResults(object):
"""
This can be used in place of a statsmodels RegressionResults
for limited purposes when it comes to model prediction.
Intended for use when loading a model from a YAML representation;
we can do model evaluation using the stored coefficients, but can't
recreate the original statsmodels fit result.
Parameters
----------
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
fit_parameters : pandas.DataFrame
Stats results from fitting `model_expression` to data.
Should include columns 'Coefficient', 'Std. Error', and 'T-Score'.
rsquared : float
rsquared_adj : float
"""
def __init__(self, model_expression, fit_parameters, rsquared,
rsquared_adj):
self.model_expression = model_expression
self.params = fit_parameters['Coefficient']
self.bse = fit_parameters['Std. Error']
self.tvalues = fit_parameters['T-Score']
self.rsquared = rsquared
self.rsquared_adj = rsquared_adj
@property
def _rhs(self):
"""
Get only the right-hand side of `model_expression`.
"""
return _rhs(self.model_expression)
def predict(self, data):
"""
Predict new values by running data through the fit model.
Parameters
----------
data : pandas.DataFrame
Table with columns corresponding to the RHS of `model_expression`.
Returns
-------
predicted : ndarray
Array of predicted values.
"""
with log_start_finish('_FakeRegressionResults prediction', logger):
model_design = dmatrix(
self._rhs, data=data, return_type='dataframe')
return model_design.dot(self.params).values
def _model_fit_to_table(fit):
"""
Produce a pandas DataFrame of model fit results from a statsmodels
fit result object.
Parameters
----------
fit : statsmodels.regression.linear_model.RegressionResults
Returns
-------
fit_parameters : pandas.DataFrame
Will have columns 'Coefficient', 'Std. Error', and 'T-Score'.
Index will be model terms.
This frame will also have non-standard attributes
.rsquared and .rsquared_adj with the same meaning and value
as on `fit`.
"""
fit_parameters = pd.DataFrame(
{'Coefficient': fit.params,
'Std. Error': fit.bse,
'T-Score': fit.tvalues})
fit_parameters.rsquared = fit.rsquared
fit_parameters.rsquared_adj = fit.rsquared_adj
return fit_parameters
YTRANSFORM_MAPPING = {
None: None,
np.exp: 'np.exp',
'np.exp': np.exp,
np.log: 'np.log',
'np.log': np.log,
np.log1p: 'np.log1p',
'np.log1p': np.log1p,
np.expm1: 'np.expm1',
'np.expm1': np.expm1
}
class RegressionModel(object):
"""
A hedonic (regression) model with the ability to store an
estimated model and predict new data based on the model.
statsmodels' OLS implementation is used.
Parameters
----------
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str or dict
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
name : optional
Optional descriptive name for this model that may be used
in output.
"""
def __init__(self, fit_filters, predict_filters, model_expression,
ytransform=None, name=None):
self.fit_filters = fit_filters
self.predict_filters = predict_filters
self.model_expression = model_expression
self.ytransform = ytransform
self.name = name or 'RegressionModel'
self.model_fit = None
self.fit_parameters = None
self.est_data = None
@classmethod
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a RegressionModel instance from a saved YAML configuration.
Arguments are mutually exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
RegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
model = cls(
cfg['fit_filters'],
cfg['predict_filters'],
cfg['model_expression'],
YTRANSFORM_MAPPING[cfg['ytransform']],
cfg['name'])
if 'fitted' in cfg and cfg['fitted']:
fit_parameters = pd.DataFrame(cfg['fit_parameters'])
fit_parameters.rsquared = cfg['fit_rsquared']
fit_parameters.rsquared_adj = cfg['fit_rsquared_adj']
model.model_fit = _FakeRegressionResults(
model.str_model_expression,
fit_parameters,
cfg['fit_rsquared'], cfg['fit_rsquared_adj'])
model.fit_parameters = fit_parameters
logger.debug('loaded regression model {} from YAML'.format(model.name))
return model
@property
def str_model_expression(self):
"""
Model expression as a string suitable for use with patsy/statsmodels.
"""
return util.str_model_expression(
self.model_expression, add_constant=True)
def fit(self, data, debug=False):
"""
Fit the model to data and store/return the results.
Parameters
----------
data : pandas.DataFrame
Data to use for fitting the model. Must contain all the
columns referenced by the `model_expression`.
debug : bool
If debug is set to true, this sets the attribute "est_data"
to a dataframe with the actual data used for estimation of
this model.
Returns
-------
fit : statsmodels.regression.linear_model.OLSResults
This is returned for inspection, but also stored on the
class instance for use during prediction.
"""
with log_start_finish('fitting model {}'.format(self.name), logger):
fit = fit_model(data, self.fit_filters, self.str_model_expression)
self.model_fit = fit
self.fit_parameters = _model_fit_to_table(fit)
if debug:
index = util.apply_filter_query(data, self.fit_filters).index
assert len(fit.model.exog) == len(index), (
"The estimate data is unequal in length to the original "
"dataframe, usually caused by nans")
df = pd.DataFrame(
fit.model.exog, columns=fit.model.exog_names, index=index)
df[fit.model.endog_names] = fit.model.endog
df["fittedvalues"] = fit.fittedvalues
df["residuals"] = fit.resid
self.est_data = df
return fit
@property
def fitted(self):
"""
True if the model is ready for prediction.
"""
return self.model_fit is not None
def assert_fitted(self):
"""
Raises a RuntimeError if the model is not ready for prediction.
"""
if not self.fitted:
raise RuntimeError('Model has not been fit.')
def report_fit(self):
"""
Print a report of the fit results.
"""
if not self.fitted:
print('Model not yet fit.')
return
print('R-Squared: {0:.3f}'.format(self.model_fit.rsquared))
print('Adj. R-Squared: {0:.3f}'.format(self.model_fit.rsquared_adj))
print('')
tbl = PrettyTable(
['Component', ])
tbl = PrettyTable()
tbl.add_column('Component', self.fit_parameters.index.values)
for col in ('Coefficient', 'Std. Error', 'T-Score'):
tbl.add_column(col, self.fit_parameters[col].values)
tbl.align['Component'] = 'l'
tbl.float_format = '.3'
print(tbl)
def predict(self, data):
"""
Predict a new data set based on an estimated model.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must contain all the columns
referenced by the right-hand side of the `model_expression`.
Returns
-------
result : pandas.Series
Predicted values as a pandas Series. Will have the index of `data`
after applying filters.
"""
self.assert_fitted()
with log_start_finish('predicting model {}'.format(self.name), logger):
return predict(
data, self.predict_filters, self.model_fit, self.ytransform)
def to_dict(self):
"""
Returns a dictionary representation of a RegressionModel instance.
"""
d = {
'model_type': 'regression',
'name': self.name,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'model_expression': self.model_expression,
'ytransform': YTRANSFORM_MAPPING[self.ytransform],
'fitted': self.fitted,
'fit_parameters': None,
'fit_rsquared': None,
'fit_rsquared_adj': None
}
if self.fitted:
d['fit_parameters'] = yamlio.frame_to_yaml_safe(
self.fit_parameters)
d['fit_rsquared'] = float(self.model_fit.rsquared)
d['fit_rsquared_adj'] = float(self.model_fit.rsquared_adj)
return d
def to_yaml(self, str_or_buffer=None):
"""
Save a model respresentation to YAML.
Parameters
----------
str_or_buffer : str or file like, optional
By default a YAML string is returned. If a string is
given here the YAML will be written to that file.
If an object with a ``.write`` method is given the
YAML will be written to that object.
Returns
-------
j : str
YAML string if `str_or_buffer` is not given.
"""
logger.debug(
'serializing regression model {} to YAML'.format(self.name))
return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression))))
@classmethod
def fit_from_cfg(cls, df, cfgname, debug=False):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
debug : boolean, optional (default False)
Whether to generate debug information on the model.
Returns
-------
RegressionModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
ret = hm.fit(df, debug=debug)
print(ret.summary())
hm.to_yaml(str_or_buffer=cfgname)
logger.debug('start: fit from configuration {}'.format(cfgname))
return hm
@classmethod
def predict_from_cfg(cls, df, cfgname):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models.
hm : RegressionModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
price_or_rent = hm.predict(df)
print(price_or_rent.describe())
logger.debug('start: predict from configuration {}'.format(cfgname))
return price_or_rent, hm
class RegressionModelGroup(object):
"""
Manages a group of regression models that refer to different segments
within a single table.
Model names must match the segment names after doing a Pandas groupby.
Parameters
----------
segmentation_col
Name of the column on which to segment.
name
Optional name used to identify the model in places.
"""
def __init__(self, segmentation_col, name=None):
self.segmentation_col = segmentation_col
self.name = name if name is not None else 'RegressionModelGroup'
self.models = {}
def add_model(self, model):
"""
Add a `RegressionModel` instance.
Parameters
----------
model : `RegressionModel`
Should have a ``.name`` attribute matching one of
the groupby segments.
"""
logger.debug(
'adding model {} to group {}'.format(model.name, self.name))
self.models[model.name] = model
def add_model_from_params(self, name, fit_filters, predict_filters,
model_expression, ytransform=None):
"""
Add a model by passing arguments through to `RegressionModel`.
Parameters
----------
name : any
Must match a groupby segment name.
fit_filters : list of str
Filters applied before fitting the model.
predict_filters : list of str
Filters applied before calculating new data points.
model_expression : str
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
"""
logger.debug(
'adding model {} to group {}'.format(name, self.name))
model = RegressionModel(
fit_filters, predict_filters, model_expression, ytransform, name)
self.models[name] = model
def _iter_groups(self, data):
"""
Iterate over the groups in `data` after grouping by
`segmentation_col`. Skips any groups for which there
is no model stored.
Yields tuples of (name, df) where name is the group key
and df is the group DataFrame.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
"""
groups = data.groupby(self.segmentation_col)
for name in self.models:
yield name, groups.get_group(name)
def fit(self, data, debug=False):
"""
Fit each of the models in the group.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true (default false) will pass the debug parameter
to model estimation.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
with log_start_finish(
'fitting models in group {}'.format(self.name), logger):
return {name: self.models[name].fit(df, debug=debug)
for name, df in self._iter_groups(data)}
@property
def fitted(self):
"""
Whether all models in the group have been fitted.
"""
return (all(m.fitted for m in self.models.values())
if self.models else False)
def predict(self, data):
"""
Predict new data for each group in the segmentation.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must have a column with the
same name as `segmentation_col`.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models.
"""
with log_start_finish(
'predicting models in group {}'.format(self.name), logger):
results = [self.models[name].predict(df)
for name, df in self._iter_groups(data)]
return pd.concat(results)
def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concat(
m.columns_used() for m in self.models.values())))
class SegmentedRegressionModel(object):
"""
A regression model group that allows segments to have different
model expressions and ytransforms but all have the same filters.
Parameters
----------
segmentation_col
Name of column in the data table on which to segment. Will be used
with a pandas groupby on the data table.
fit_filters : list of str, optional
Filters applied before fitting the model.
predict_filters : list of str, optional
Filters applied before calculating new data points.
min_segment_size : int
This model will add all segments that have at least this number of
observations. A very small number of observations (e.g. 1) will
cause an error with estimation.
default_model_expr : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
default_ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
By default no transformation is applied.
min_segment_size : int, optional
Segments with less than this many members will be skipped.
name : str, optional
A name used in places to identify the model.
"""
def __init__(
self, segmentation_col, fit_filters=None, predict_filters=None,
default_model_expr=None, default_ytransform=None,
min_segment_size=0, name=None):
self.segmentation_col = segmentation_col
self._group = RegressionModelGroup(segmentation_col)
self.fit_filters = fit_filters
self.predict_filters = predict_filters
self.default_model_expr = default_model_expr
self.default_ytransform = default_ytransform
self.min_segment_size = min_segment_size
self.name = name if name is not None else 'SegmentedRegressionModel'
@classmethod
def from_yaml(cls, yaml_str=None, str_or_buffer=None):
"""
Create a SegmentedRegressionModel instance from a saved YAML
configuration. Arguments are mutally exclusive.
Parameters
----------
yaml_str : str, optional
A YAML string from which to load model.
str_or_buffer : str or file like, optional
File name or buffer from which to load YAML.
Returns
-------
SegmentedRegressionModel
"""
cfg = yamlio.yaml_to_dict(yaml_str, str_or_buffer)
default_model_expr = cfg['default_config']['model_expression']
default_ytransform = cfg['default_config']['ytransform']
seg = cls(
cfg['segmentation_col'], cfg['fit_filters'],
cfg['predict_filters'], default_model_expr,
YTRANSFORM_MAPPING[default_ytransform], cfg['min_segment_size'],
cfg['name'])
if "models" not in cfg:
cfg["models"] = {}
for name, m in cfg['models'].items():
m['model_expression'] = m.get(
'model_expression', default_model_expr)
m['ytransform'] = m.get('ytransform', default_ytransform)
m['fit_filters'] = None
m['predict_filters'] = None
reg = RegressionModel.from_yaml(yamlio.convert_to_yaml(m, None))
seg._group.add_model(reg)
logger.debug(
'loaded segmented regression model {} from yaml'.format(seg.name))
return seg
def add_segment(self, name, model_expression=None, ytransform='default'):
"""
Add a new segment with its own model expression and ytransform.
Parameters
----------
name :
Segment name. Must match a segment in the groupby of the data.
model_expression : str or dict, optional
A patsy model expression that can be used with statsmodels.
Should contain both the left- and right-hand sides.
If not given the default model will be used, which must not be
None.
ytransform : callable, optional
A function to call on the array of predicted output.
For example, if the model relation is predicting the log
of price, you might pass ``ytransform=np.exp`` so that
the results reflect actual price.
If not given the default ytransform will be used.
"""
if not model_expression:
if self.default_model_expr is None:
raise ValueError(
'No default model available, '
'you must supply a model experssion.')
model_expression = self.default_model_expr
if ytransform == 'default':
ytransform = self.default_ytransform
# no fit or predict filters, we'll take care of that this side.
self._group.add_model_from_params(
name, None, None, model_expression, ytransform)
logger.debug('added segment {} to model {}'.format(name, self.name))
def fit(self, data, debug=False):
"""
Fit each segment. Segments that have not already been explicitly
added will be automatically added with default model and ytransform.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
debug : bool
If set to true will pass debug to the fit method of each model.
Returns
-------
fits : dict of statsmodels.regression.linear_model.OLSResults
Keys are the segment names.
"""
data = util.apply_filter_query(data, self.fit_filters)
unique = data[self.segmentation_col].unique()
value_counts = data[self.segmentation_col].value_counts()
# Remove any existing segments that may no longer have counterparts
# in the data. This can happen when loading a saved model and then
# calling this method with data that no longer has segments that
# were there the last time this was called.
gone = set(self._group.models) - set(unique)
for g in gone:
del self._group.models[g]
for x in unique:
if x not in self._group.models and \
value_counts[x] > self.min_segment_size:
self.add_segment(x)
with log_start_finish(
'fitting models in segmented model {}'.format(self.name),
logger):
return self._group.fit(data, debug=debug)
@property
def fitted(self):
"""
Whether models for all segments have been fit.
"""
return self._group.fitted
def predict(self, data):
"""
Predict new data for each group in the segmentation.
Parameters
----------
data : pandas.DataFrame
Data to use for prediction. Must have a column with the
same name as `segmentation_col`.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters.
"""
with log_start_finish(
'predicting models in segmented model {}'.format(self.name),
logger):
data = util.apply_filter_query(data, self.predict_filters)
return self._group.predict(data)
def _process_model_dict(self, d):
"""
Remove redundant items from a model's configuration dict.
Parameters
----------
d : dict
Modified in place.
Returns
-------
dict
Modified `d`.
"""
del d['model_type']
del d['fit_filters']
del d['predict_filters']
if d['model_expression'] == self.default_model_expr:
del d['model_expression']
if YTRANSFORM_MAPPING[d['ytransform']] == self.default_ytransform:
del d['ytransform']
d["name"] = yamlio.to_scalar_safe(d["name"])
return d
def to_dict(self):
"""
Returns a dict representation of this instance suitable for
conversion to YAML.
"""
return {
'model_type': 'segmented_regression',
'name': self.name,
'segmentation_col': self.segmentation_col,
'fit_filters': self.fit_filters,
'predict_filters': self.predict_filters,
'min_segment_size': self.min_segment_size,
'default_config': {
'model_expression': self.default_model_expr,
'ytransform': YTRANSFORM_MAPPING[self.default_ytransform]
},
'fitted': self.fitted,
'models': {
yamlio.to_scalar_safe(name):
self._process_model_dict(m.to_dict())
for name, m in self._group.models.items()}
}
def to_yaml(self, str_or_buffer=None):
"""
Save a model respresentation to YAML.
Parameters
----------
str_or_buffer : str or file like, optional
By default a YAML string is returned. If a string is
given here the YAML will be written to that file.
If an object with a ``.write`` method is given the
YAML will be written to that object.
Returns
-------
j : str
YAML string if `str_or_buffer` is not given.
"""
logger.debug(
'serializing segmented regression model {} to yaml'.format(
self.name))
return yamlio.convert_to_yaml(self.to_dict(), str_or_buffer)
def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.default_model_expr),
self._group.columns_used(),
[self.segmentation_col])))
@classmethod
def fit_from_cfg(cls, df, cfgname, debug=False, min_segment_size=None):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
debug : boolean, optional (default False)
Whether to generate debug information on the model.
min_segment_size : int, optional
Set attribute on the model.
Returns
-------
hm : SegmentedRegressionModel which was used to fit
"""
logger.debug('start: fit from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
if min_segment_size:
hm.min_segment_size = min_segment_size
for k, v in hm.fit(df, debug=debug).items():
print("REGRESSION RESULTS FOR SEGMENT %s\n" % str(k))
print(v.summary())
hm.to_yaml(str_or_buffer=cfgname)
logger.debug('finish: fit from configuration {}'.format(cfgname))
return hm
@classmethod
def predict_from_cfg(cls, df, cfgname, min_segment_size=None):
"""
Parameters
----------
df : DataFrame
The dataframe which contains the columns to use for the estimation.
cfgname : string
The name of the yaml config file which describes the hedonic model.
min_segment_size : int, optional
Set attribute on the model.
Returns
-------
predicted : pandas.Series
Predicted data in a pandas Series. Will have the index of `data`
after applying filters and minus any groups that do not have
models.
hm : SegmentedRegressionModel which was used to predict
"""
logger.debug('start: predict from configuration {}'.format(cfgname))
hm = cls.from_yaml(str_or_buffer=cfgname)
if min_segment_size:
hm.min_segment_size = min_segment_size
price_or_rent = hm.predict(df)
print(price_or_rent.describe())
logger.debug('finish: predict from configuration {}'.format(cfgname))
return price_or_rent, hm
| bsd-3-clause |
xuewei4d/scikit-learn | examples/mixture/plot_gmm_selection.py | 15 | 3396 | """
================================
Gaussian Mixture Model Selection
================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
import numpy as np
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
plt.figure(figsize=(8, 6))
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title(f'Selected GMM: {best_gmm.covariance_type} model, '
f'{best_gmm.n_components} components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/core/dtypes/cast.py | 2 | 41914 | """ routings for casting """
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas._libs import tslib, lib
from pandas._libs.tslib import iNaT
from pandas.compat import string_types, text_type, PY3
from .common import (_ensure_object, is_bool, is_integer, is_float,
is_complex, is_datetimetz, is_categorical_dtype,
is_datetimelike,
is_extension_type,
is_extension_array_dtype,
is_object_dtype,
is_datetime64tz_dtype, is_datetime64_dtype,
is_datetime64_ns_dtype,
is_timedelta64_dtype, is_timedelta64_ns_dtype,
is_dtype_equal,
is_float_dtype, is_complex_dtype,
is_integer_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype, is_scalar,
is_string_dtype, _string_dtypes,
pandas_dtype,
_ensure_int8, _ensure_int16,
_ensure_int32, _ensure_int64,
_NS_DTYPE, _TD_DTYPE, _INT64_DTYPE,
_POSSIBLY_CAST_DTYPES)
from .dtypes import (ExtensionDtype, PandasExtensionDtype, DatetimeTZDtype,
PeriodDtype)
from .generic import (ABCDatetimeIndex, ABCPeriodIndex,
ABCSeries)
from .missing import isna, notna
from .inference import is_list_like
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = construct_1d_object_array_from_listlike(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj):
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj):
if any(isinstance(v, ABCSeries) for v in obj.values):
return True
return False
def maybe_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
if is_scalar(result):
return result
def trans(x):
return x
if isinstance(dtype, string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))
if inferred_type == 'boolean':
dtype = 'bool'
elif inferred_type == 'integer':
dtype = 'int64'
elif inferred_type == 'datetime64':
dtype = 'datetime64[ns]'
elif inferred_type == 'timedelta64':
dtype = 'timedelta64[ns]'
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
def trans(x): # noqa
return x.round()
else:
dtype = 'object'
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
try:
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
if (result.dtype.itemsize <= dtype.itemsize and
np.prod(result.shape)):
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
# if we don't have any elements, just astype it
if not np.prod(result.shape):
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
# if we have any nulls, then we are done
if (isna(arr).any() or
not np.allclose(arr, trans(arr).astype(dtype), rtol=0)):
return result
# a comparable, e.g. a Decimal may slip in here
elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,
float, bool)):
return result
if (issubclass(result.dtype.type, (np.object_, np.number)) and
notna(result).all()):
new_result = trans(result).astype(dtype)
try:
if np.allclose(new_result, result, rtol=0):
return new_result
except Exception:
# comparison of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
elif (issubclass(dtype.type, np.floating) and
not is_bool_dtype(result.dtype)):
return result.astype(dtype)
# a datetimelike
# GH12821, iNaT is casted to float
elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']:
try:
result = result.astype(dtype)
except Exception:
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize('utc')
result = result.tz_convert(dtype.tz)
except Exception:
pass
return result
def maybe_upcast_putmask(result, mask, other):
"""
A safe version of putmask that potentially upcasts the result
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : ndarray or scalar
The source array or value
Returns
-------
result : ndarray
changed : boolean
Set to true if the result array was upcasted
"""
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if is_datetimelike(result.dtype):
if is_scalar(other):
if isna(other):
other = result.dtype.type('nat')
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
elif is_integer_dtype(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_result = result.values.copy()
new_result[mask] = om_at
result[:] = new_result
return result, False
except Exception:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (is_scalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isna(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isna(other[mask]).any():
return changeit()
try:
np.place(result, mask, other)
except Exception:
return changeit()
return result, False
def maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = iNaT
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
# for now: refuse to upcast datetime64
# (this is because datetime64 will not implicitly upconvert
# to object correctly as of numpy 1.6.1)
if isna(fill_value):
fill_value = iNaT
else:
if issubclass(dtype.type, np.datetime64):
try:
fill_value = tslib.Timestamp(fill_value).value
except Exception:
# the proper thing to do here would probably be to upcast
# to object (but numpy 1.6.1 doesn't do this properly)
fill_value = iNaT
elif issubclass(dtype.type, np.timedelta64):
try:
fill_value = tslib.Timedelta(fill_value).value
except Exception:
# as for datetimes, cannot upcast to object
fill_value = iNaT
else:
fill_value = iNaT
elif is_datetimetz(dtype):
if isna(fill_value):
fill_value = iNaT
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, (np.integer, np.floating)):
dtype = np.complex128
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = iNaT
else:
dtype = np.object_
fill_value = np.nan
else:
dtype = np.object_
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif is_datetimetz(dtype):
pass
elif issubclass(np.dtype(dtype).type, string_types):
dtype = np.object_
return dtype, fill_value
def infer_dtype_from(val, pandas_dtype=False):
"""
interpret the dtype from a scalar or array. This is a convenience
routines to infer dtype from a scalar or an array
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to _infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, string_types):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)):
val = tslib.Timestamp(val)
if val is tslib.NaT or val.tz is None:
dtype = np.dtype('M8[ns]')
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
else:
# return datetimetz as object
return np.object_, val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslib.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
elif is_integer(val):
if isinstance(val, np.integer):
dtype = type(val)
else:
dtype = np.int64
elif is_float(val):
if isinstance(val, np.floating):
dtype = type(val)
else:
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
val = val.ordinal
return dtype, val
def infer_dtype_from_array(arr, pandas_dtype=False):
"""
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_type(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr)
if inferred in ['string', 'bytes', 'unicode',
'mixed', 'mixed-integer']:
return (np.object_, arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""Try to infer an object's dtype, for use in arithmetic ops
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
numpy.int64
"""
tipo = None
if hasattr(element, 'dtype'):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def maybe_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: {dtype}"
.format(dtype=dtype))
def invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - _string_dtypes
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def maybe_convert_string_to_object(values):
"""
Convert string-like and string-like array to convert object dtype.
This is to avoid numpy to handle the array as str dtype.
"""
if isinstance(values, string_types):
values = np.array([values], dtype=object)
elif (isinstance(values, np.ndarray) and
issubclass(values.dtype.type, (np.string_, np.unicode_))):
values = values.astype(object)
return values
def maybe_convert_scalar(values):
"""
Convert a python scalar to the appropriate numpy dtype if possible
This avoids numpy directly converting according to platform preferences
"""
if is_scalar(values):
dtype, values = infer_dtype_from_scalar(values)
try:
values = dtype(values)
except TypeError:
pass
return values
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return _ensure_int8(indexer)
elif length < _int16_max:
return _ensure_int16(indexer)
elif length < _int32_max:
return _ensure_int32(indexer)
return _ensure_int64(indexer)
def coerce_to_dtypes(result, dtypes):
"""
given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
def conv(r, dtype):
try:
if isna(r):
pass
elif dtype == _NS_DTYPE:
r = tslib.Timestamp(r)
elif dtype == _TD_DTYPE:
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except Exception:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
def astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, text_type):
# in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel()).reshape(arr.shape)
elif issubclass(dtype.type, string_types):
return lib.astype_str(arr.ravel()).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == 'M':
return arr.astype(dtype)
raise TypeError("cannot astype a datetimelike from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return tslib.ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
# in py3, timedelta64[ns] are int64
if ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
# we return a float here!
if dtype.kind == 'm':
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == _TD_DTYPE:
return arr.astype(_TD_DTYPE, copy=copy)
raise TypeError("cannot astype a timedelta from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if not np.isfinite(arr).all():
raise ValueError('Cannot convert non-finite values (NA or inf) to '
'integer')
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr).values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = ("Passing in '{dtype}' dtype with no frequency is "
"deprecated and will raise in a future version. "
"Please pass in '{dtype}[ns]' instead.")
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = np.dtype(dtype.name + "[ns]")
if copy:
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def maybe_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = maybe_cast_to_datetime(
values, 'M8[ns]', errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.core.tools.timedeltas import to_timedelta
new_values = to_timedelta(values, errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
except Exception:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values
def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
coerce=False, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError('At least one of datetime, numeric or timedelta must '
'be True.')
elif conversion_count > 1 and coerce:
raise ValueError("Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True.")
if isinstance(values, (list, tuple)):
# List or scalar
values = np.array(values, dtype=np.object_)
elif not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
elif not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
# If 1 flag is coerce, ensure 2 others are False
if coerce:
# Immediate return if coerce
if datetime:
from pandas import to_datetime
return to_datetime(values, errors='coerce', box=False)
elif timedelta:
from pandas import to_timedelta
return to_timedelta(values, errors='coerce', box=False)
elif numeric:
from pandas import to_numeric
return to_numeric(values, errors='coerce')
# Soft conversions
if datetime:
values = lib.maybe_convert_objects(values, convert_datetime=datetime)
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=timedelta)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
except Exception:
pass
return values
def maybe_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == 'M':
return is_datetime64_ns_dtype(arr.dtype)
elif kind == 'm':
return is_timedelta64_ns_dtype(arr.dtype)
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(value, convert_dates=False):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : boolean, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex)):
return value
elif isinstance(value, ABCSeries):
if isinstance(value._values, ABCDatetimeIndex):
return value._values
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v,
require_iso8601=True,
errors='raise')
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
try:
from pandas._libs.tslibs import conversion
from pandas import DatetimeIndex
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize(
'UTC').tz_convert(tz=tz)
except (ValueError, TypeError):
pass
except Exception:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
return to_timedelta(v)._ndarray_values.reshape(shape)
except Exception:
return v.reshape(shape)
inferred_type = lib.infer_datetimelike_array(_ensure_object(v))
if inferred_type == 'date' and convert_dates:
value = try_datetime(v)
elif inferred_type == 'datetime':
value = try_datetime(v)
elif inferred_type == 'timedelta':
value = try_timedelta(v)
elif inferred_type == 'nat':
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but
# technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
value = try_datetime(v)
return value
def maybe_cast_to_datetime(value, dtype, errors='raise'):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.timedeltas import to_timedelta
from pandas.core.tools.datetimes import to_datetime
if dtype is not None:
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# force the dtype if needed
msg = ("Passing in '{dtype}' dtype with no frequency is "
"deprecated and will raise in a future version. "
"Please pass in '{dtype}[ns]' instead.")
if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
if dtype.name in ('datetime64', 'datetime64[ns]'):
if dtype.name == 'datetime64':
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
"dtype [{dtype}]".format(dtype=dtype))
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isna(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
if dtype.name in ('timedelta64', 'timedelta64[ns]'):
if dtype.name == 'timedelta64':
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
"dtype [{dtype}]".format(dtype=dtype))
if is_scalar(value):
if value == iNaT or isna(value):
value = iNaT
else:
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype,
dtype):
try:
if is_datetime64:
value = to_datetime(value, errors=errors)._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value)
value = to_datetime(value, errors=errors)
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = (value.tz_localize('UTC')
.tz_convert(dtype.tz))
elif is_timedelta64:
value = to_timedelta(value, errors=errors)._values
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
ints = np.asarray(value).view('i8')
return tslib.ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError('Cannot cast datetime64 to {dtype}'
.format(dtype=dtype))
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ['M', 'm']:
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
value = to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (is_array and not (issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
value = maybe_infer_to_datetimelike(value)
return value
def find_common_type(types):
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
if any(isinstance(t, (PandasExtensionDtype, ExtensionDtype))
for t in types):
return np.object
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype('datetime64[ns]')
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
has_ints = any(is_integer_dtype(t) for t in types)
has_floats = any(is_float_dtype(t) for t in types)
has_complex = any(is_complex_dtype(t) for t in types)
if has_ints or has_floats or has_complex:
return np.object
return np.find_common_type(types, [])
def cast_scalar_to_array(shape, value, dtype=None):
"""
create np.ndarray of specified shape and dtype, filled with values
Parameters
----------
shape : tuple
value : scalar value
dtype : np.dtype, optional
dtype to coerce
Returns
-------
ndarray of shape, filled with value, of specified / inferred dtype
"""
if dtype is None:
dtype, fill_value = infer_dtype_from_scalar(value)
else:
fill_value = value
values = np.empty(shape, dtype=dtype)
values.fill(fill_value)
return values
def construct_1d_arraylike_from_scalar(value, length, dtype):
"""
create a np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype / np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if is_datetimetz(dtype):
from pandas import DatetimeIndex
subarr = DatetimeIndex([value] * length, dtype=dtype)
elif is_categorical_dtype(dtype):
from pandas import Categorical
subarr = Categorical([value] * length, dtype=dtype)
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
# coerce if we have nan for an integer dtype
if is_integer_dtype(dtype) and isna(value):
dtype = np.float64
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
return subarr
def construct_1d_object_array_from_listlike(values):
"""
Transform any list-like object in a 1-dimensional numpy array of object
dtype.
Parameters
----------
values : any iterable which has a len()
Raises
------
TypeError
* If `values` does not have a len()
Returns
-------
1-dimensional numpy array of dtype object
"""
# numpy will try to interpret nested lists as further dimensions, hence
# making a 1D array that contains list-likes is a bit tricky:
result = np.empty(len(values), dtype='object')
result[:] = values
return result
def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
"""
Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
Parameters
----------
values : Sequence
dtype : numpy.dtype, optional
copy : bool, default False
Note that copies may still be made with ``copy=False`` if casting
is required.
Returns
-------
arr : ndarray[dtype]
Examples
--------
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
"""
subarr = np.array(values, dtype=dtype, copy=copy)
if dtype is not None and dtype.kind in ("U", "S"):
# GH-21083
# We can't just return np.array(subarr, dtype='str') since
# NumPy will convert the non-string objects into strings
# Including NA values. Se we have to go
# string -> object -> update NA, which requires an
# additional pass over the data.
na_values = isna(values)
subarr2 = subarr.astype(object)
subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
subarr = subarr2
return subarr
| mit |
tmhm/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
cmorgan/zipline | tests/modelling/test_factor.py | 9 | 2969 | """
Tests for Factor terms.
"""
from unittest import TestCase
from numpy import (
array,
)
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
date_range,
Int64Index,
)
from six import iteritems
from zipline.errors import UnknownRankMethod
from zipline.modelling.factor import TestingFactor
class F(TestingFactor):
inputs = ()
window_length = 0
class FactorTestCase(TestCase):
def setUp(self):
self.f = F()
self.dates = date_range('2014-01-01', periods=5, freq='D')
self.assets = Int64Index(range(5))
self.mask = DataFrame(True, index=self.dates, columns=self.assets)
def tearDown(self):
pass
def test_bad_input(self):
with self.assertRaises(UnknownRankMethod):
self.f.rank("not a real rank method")
def test_rank(self):
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]])
expected_ranks = {
'ordinal': array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
'average': array([[1.5, 3., 4., 5., 1.5],
[2.5, 4., 5., 1., 2.5],
[3.5, 5., 1., 2., 3.5],
[4.5, 1., 2., 3., 4.5],
[1.5, 3., 4., 5., 1.5]]),
'min': array([[1., 3., 4., 5., 1.],
[2., 4., 5., 1., 2.],
[3., 5., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 3., 4., 5., 1.]]),
'max': array([[2., 3., 4., 5., 2.],
[3., 4., 5., 1., 3.],
[4., 5., 1., 2., 4.],
[5., 1., 2., 3., 5.],
[2., 3., 4., 5., 2.]]),
'dense': array([[1., 2., 3., 4., 1.],
[2., 3., 4., 1., 2.],
[3., 4., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 2., 3., 4., 1.]]),
}
# Test with the default, which should be 'ordinal'.
default_result = self.f.rank().compute_from_arrays([data], self.mask)
assert_array_equal(default_result, expected_ranks['ordinal'])
# Test with each method passed explicitly.
for method, expected_result in iteritems(expected_ranks):
result = self.f.rank(method=method).compute_from_arrays(
[data],
self.mask,
)
assert_array_equal(result, expected_ranks[method])
| apache-2.0 |
jm-begon/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
sbtlaarzc/vispy | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause |
tetherless-world/linkipedia | dataone/annotation-cnn/pretrain_embedding.py | 1 | 2864 | import numpy as np
import gensim
from sklearn.preprocessing import OneHotEncoder
class PreTrainEmbedding():
def __init__(self, file, embedding_size):
self.embedding_size = embedding_size
self.char_alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-,;.!?:\'"/\\|_@#$%^&*~`+=<>()[]{} '
self.char_embedding_table = self.construct_char_embeddings()
self.model = gensim.models.Word2Vec.load_word2vec_format(file, binary=True)
def get_one_hot_encoding(self, target_classes):
enc = OneHotEncoder()
return enc.fit_transform(np.array(target_classes).reshape(-1,1)).toarray()
def construct_char_embeddings(self):
ascii_list = [ord(c) for c in self.char_alphabet]
ascii_list.sort()
encodings = self.get_one_hot_encoding(ascii_list)
result = dict()
for i, enc in enumerate(encodings):
result[ascii_list[i]] = enc
return result
def get_embedding(self, word):
try:
result = self.model[word]
return result
except KeyError:
#print 'Can not get embedding for ', word
return None
def get_glove_embedding(self, vectors_file='glove.6B.100d.txt'):
with open(vectors_file, 'r') as f:
vectors = {}
for line in f:
vals = line.rstrip().split(' ')
vectors[vals[0]] = [float(x) for x in vals[1:]]
vocab_size = len(vectors)
words = vectors.keys()
vocab = {w: idx for idx, w in enumerate(words)}
ivocab = {idx: w for idx, w in enumerate(words)}
vector_dim = len(vectors[ivocab[0]])
W = np.zeros((vocab_size, vector_dim))
for word, v in vectors.items():
if word == '<unk>':
continue
W[vocab[word], :] = v
return vocab, W
def create_char_level_embeddings(self, sentence, max_doc_length):
sent_embed = np.zeros((max_doc_length, self.embedding_size))
idx = 0
for c in sentence:
try:
sent_embed[idx, :] = self.char_embedding_table[ord(c)]
except KeyError:
pass
continue
idx = idx + 1
if idx == max_doc_length:
break
return sent_embed
def create_word_level_embeddings(self, sentence, max_doc_length):
sent_embed = np.zeros((max_doc_length, self.embedding_size))
if sentence is None:
return sent_embed
idx = 0
for word in sentence.split():
embedding = self.get_embedding(word)
if embedding is not None:
sent_embed[idx, :] = embedding
idx = idx + 1
if idx == max_doc_length:
break
return sent_embed
| gpl-3.0 |
vorasagar7/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_replicas_import.py | 19 | 5474 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
# benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
# print benchmark_df1['shard_replicas']
# print benchmark_df1
# print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream):
"""formats and creates a line chart
@param1: import_seconds_kilo Array with import_seconds from kilo
@type: numpy array
@param2: replicas_kilo Array with replicas from kilo
@type: numpy array
@param3: import_seconds_chameleon Array with import_seconds from chameleon
@type: numpy array
@param4: replicas_chameleon Array with replicas from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average Mongoimport Runtime by Shard Replication Factor')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
# Make the chart
plt.plot(replicas_kilo, import_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, import_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, import_seconds_jetstream, label='Jetstream Cloud')
# http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/replica_import.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream)
| apache-2.0 |
eusi/MissionPlanerHM | Lib/site-packages/numpy/linalg/linalg.py | 53 | 61098 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import sys
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 2.0.0.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
See Also
--------
det
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-3.0 |
lpantano/bcbio-nextgen | bcbio/rnaseq/count.py | 5 | 2271 | """
count number of reads mapping to features of transcripts
"""
import os
import sys
import itertools
import pandas as pd
import gffutils
from bcbio.utils import file_exists
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio import bam
import bcbio.pipeline.datadict as dd
def combine_count_files(files, out_file=None, ext=".fpkm"):
"""
combine a set of count files into a single combined file
"""
assert all([file_exists(x) for x in files]), \
"Some count files in %s do not exist." % files
for f in files:
assert file_exists(f), "%s does not exist or is empty." % f
col_names = [os.path.basename(x.replace(ext, "")) for x in files]
if not out_file:
out_dir = os.path.join(os.path.dirname(files[0]))
out_file = os.path.join(out_dir, "combined.counts")
if file_exists(out_file):
return out_file
for i, f in enumerate(files):
if i == 0:
df = pd.io.parsers.read_table(f, sep="\t", index_col=0, header=None,
names=[col_names[0]])
else:
df = df.join(pd.io.parsers.read_table(f, sep="\t", index_col=0,
header=None,
names=[col_names[i]]))
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
def annotate_combined_count_file(count_file, gtf_file, out_file=None):
dbfn = gtf_file + ".db"
if not file_exists(dbfn):
return None
if not gffutils:
return None
db = gffutils.FeatureDB(dbfn, keep_order=True)
if not out_file:
out_dir = os.path.dirname(count_file)
out_file = os.path.join(out_dir, "annotated_combined.counts")
# if the genes don't have a gene_id or gene_name set, bail out
try:
symbol_lookup = {f['gene_id'][0]: f['gene_name'][0] for f in
db.features_of_type('exon')}
except KeyError:
return None
df = pd.io.parsers.read_table(count_file, sep="\t", index_col=0, header=0)
df['symbol'] = df.apply(lambda x: symbol_lookup.get(x.name, ""), axis=1)
df.to_csv(out_file, sep="\t", index_label="id")
return out_file
| mit |
timmie/cartopy | lib/cartopy/tests/mpl/test_caching.py | 2 | 8842 | # (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import gc
import six
import unittest
try:
from owslib.wmts import WebMapTileService
except ImportError as e:
WebMapTileService = None
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
from matplotlib.path import Path
import cartopy.crs as ccrs
from cartopy.mpl.feature_artist import FeatureArtist
from cartopy.io.ogc_clients import WMTSRasterSource, _OWSLIB_AVAILABLE
import cartopy.io.shapereader
import cartopy.mpl.geoaxes as cgeoaxes
import cartopy.mpl.patch
from cartopy.examples.waves import sample_data
class CallCounter(object):
"""
Exposes a context manager which can count the number of calls to a specific
function. (useful for cache checking!)
Internally, the target function is replaced with a new one created
by this context manager which then increments ``self.count`` every
time it is called.
Example usage::
show_counter = CallCounter(plt, 'show')
with show_counter:
plt.show()
plt.show()
plt.show()
print show_counter.count # <--- outputs 3
"""
def __init__(self, parent, function_name):
self.count = 0
self.parent = parent
self.function_name = function_name
self.orig_fn = getattr(parent, function_name)
def __enter__(self):
def replacement_fn(*args, **kwargs):
self.count += 1
return self.orig_fn(*args, **kwargs)
setattr(self.parent, self.function_name, replacement_fn)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
setattr(self.parent, self.function_name, self.orig_fn)
def test_coastline_loading_cache():
# a5caae040ee11e72a62a53100fe5edc355304419 added coastline caching.
# This test ensures it is working.
# Create coastlines to ensure they are cached.
ax1 = plt.subplot(2, 1, 1, projection=ccrs.PlateCarree())
ax1.coastlines()
plt.draw()
# Create another instance of the coastlines and count
# the number of times shapereader.Reader is created.
counter = CallCounter(cartopy.io.shapereader.Reader, '__init__')
with counter:
ax2 = plt.subplot(2, 1, 1, projection=ccrs.Robinson())
ax2.coastlines()
plt.draw()
assert counter.count == 0, ('The shapereader Reader class was created {} '
'times, indicating that the caching is not '
'working.'.format(counter.count))
plt.close()
def test_shapefile_transform_cache():
# a5caae040ee11e72a62a53100fe5edc355304419 added shapefile mpl
# geometry caching based on geometry object id. This test ensures
# it is working.
coastline_path = cartopy.io.shapereader.natural_earth(resolution="50m",
category='physical',
name='coastline')
geoms = cartopy.io.shapereader.Reader(coastline_path).geometries()
# Use the first 10 of them.
geoms = tuple(geoms)[:10]
n_geom = len(geoms)
ax = plt.axes(projection=ccrs.Robinson())
# Empty the cache.
FeatureArtist._geom_key_to_geometry_cache.clear()
FeatureArtist._geom_key_to_path_cache.clear()
assert len(FeatureArtist._geom_key_to_geometry_cache) == 0
assert len(FeatureArtist._geom_key_to_path_cache) == 0
counter = CallCounter(ax.projection, 'project_geometry')
with counter:
ax.add_geometries(geoms, ccrs.PlateCarree())
ax.add_geometries(geoms, ccrs.PlateCarree())
ax.add_geometries(geoms[:], ccrs.PlateCarree())
ax.figure.canvas.draw()
# Without caching the count would have been
# n_calls * n_geom, but should now be just n_geom.
assert counter.count == n_geom, ('The given geometry was transformed too '
'many times (expected: %s; got %s) - the'
' caching is not working.'
''.format(n_geom, n_geom, counter.count))
# Check the cache has an entry for each geometry.
assert len(FeatureArtist._geom_key_to_geometry_cache) == n_geom
assert len(FeatureArtist._geom_key_to_path_cache) == n_geom
# Check that the cache is empty again once we've dropped all references
# to the source paths.
plt.clf()
del geoms
gc.collect()
assert len(FeatureArtist._geom_key_to_geometry_cache) == 0
assert len(FeatureArtist._geom_key_to_path_cache) == 0
plt.close()
def test_contourf_transform_path_counting():
ax = plt.axes(projection=ccrs.Robinson())
ax.figure.canvas.draw()
# Capture the size of the cache before our test.
gc.collect()
initial_cache_size = len(cgeoaxes._PATH_TRANSFORM_CACHE)
path_to_geos_counter = CallCounter(cartopy.mpl.patch, 'path_to_geos')
with path_to_geos_counter:
x, y, z = sample_data((30, 60))
cs = plt.contourf(x, y, z, 5, transform=ccrs.PlateCarree())
n_geom = sum([len(c.get_paths()) for c in cs.collections])
del cs
if not six.PY3:
del c
ax.figure.canvas.draw()
# Before the performance enhancement, the count would have been 2 * n_geom,
# but should now be just n_geom.
msg = ('The given geometry was transformed too many times (expected: %s; '
'got %s) - the caching is not working.'
'' % (n_geom, path_to_geos_counter.count))
assert path_to_geos_counter.count == n_geom, msg
# Check the cache has an entry for each geometry.
assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == initial_cache_size + n_geom
# Check that the cache is empty again once we've dropped all references
# to the source paths.
plt.clf()
gc.collect()
assert len(cgeoaxes._PATH_TRANSFORM_CACHE) == initial_cache_size
plt.close()
@unittest.skipIf(not _OWSLIB_AVAILABLE, 'OWSLib is unavailable.')
def test_wmts_tile_caching():
image_cache = WMTSRasterSource._shared_image_cache
image_cache.clear()
assert len(image_cache) == 0
url = 'https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
wmts = WebMapTileService(url)
layer_name = 'MODIS_Terra_CorrectedReflectance_TrueColor'
source = WMTSRasterSource(wmts, layer_name)
gettile_counter = CallCounter(wmts, 'gettile')
crs = ccrs.PlateCarree()
extent = (-180, 180, -90, 90)
resolution = (20, 10)
with gettile_counter:
source.fetch_raster(crs, extent, resolution)
n_tiles = 2
assert gettile_counter.count == n_tiles, ('Too many tile requests - '
'expected {}, got {}.'.format(
n_tiles,
gettile_counter.count)
)
gc.collect()
assert len(image_cache) == 1
assert len(image_cache[wmts]) == 1
tiles_key = (layer_name, '0')
assert len(image_cache[wmts][tiles_key]) == n_tiles
# Second time around we shouldn't request any more tiles so the
# call count will stay the same.
with gettile_counter:
source.fetch_raster(crs, extent, resolution)
assert gettile_counter.count == n_tiles, ('Too many tile requests - '
'expected {}, got {}.'.format(
n_tiles,
gettile_counter.count)
)
gc.collect()
assert len(image_cache) == 1
assert len(image_cache[wmts]) == 1
tiles_key = (layer_name, '0')
assert len(image_cache[wmts][tiles_key]) == n_tiles
# Once there are no live references the weak-ref cache should clear.
del source, wmts, gettile_counter
gc.collect()
assert len(image_cache) == 0
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| gpl-3.0 |
JonnoFTW/htm-models-adelaide | engine/evaluate.py | 1 | 5391 | from __future__ import print_function
from index import create_upstream_model
from metrics import geh, rmse, mape
from collections import OrderedDict, defaultdict
import csv
import tabulate
from datetime import datetime, timedelta
from pluck import pluck
import numpy as np
import pyprind
steps = [1]
eps = 1e-6
def run_data(fname, limit=None, sensors=None):
data = []
# load up the data
print("Loading Data")
data_rows = 0
max_input = 0
last_row = np.inf
with open(fname, 'rb') as infile:
reader = csv.DictReader(infile)
fields = reader.fieldnames
for row in reader:
dt = datetime.strptime(row['timestamp'], "%Y-%m-%d %H:%M:%S")
if type(limit) is datetime and limit > dt:
last_row = data_rows
if sensors is None:
counts = [int(row[x]) for x in fields[1:]]
else:
counts = [int(row[x]) for x in fields[1:] if int(x) in sensors]
if any(map(lambda x: x > 300, counts)):
continue
downstream = max(1, sum(counts))
data.append({
'timestamp': dt,
'downstream': downstream
})
if downstream < 300:
max_input = max(max_input, downstream)
data_rows += 1
# if data_rows > 100:
# break
print("Data length", data_rows, "max_input", max_input)
# process the data
model = create_upstream_model(max_input, steps)
step_predictions = defaultdict(list)
row_count = 0
progress = pyprind.ProgBar(min(last_row, data_rows), width=50, stream=1)
it = iter(data)
for row in it:
progress.update()
result = model.run(row)
for i in steps:
step_predictions[i].append(result.inferences["multiStepBestPredictions"][i])
if type(limit) is datetime and row['timestamp'] >= limit:
break
row_count += 1
print ("Trained on", row_count, "rows")
return step_predictions, data, model, it, row_count, len(data)
if __name__ == "__main__":
import sys
for i in sys.argv[1:]:
print("Running ", i)
fname = i.split('/')[-1]
predictions, data, model, it, row_count, data_len = run_data(i, limit=datetime(2013, 4, 23))
model.save('/scratch/model_store/model_3002_1_step')
# turn the data into numpy arrays
split_idx = int(len(data) * 0.4)
flow_values = np.array(pluck(data[split_idx:], 'downstream'))
print()
# print (predictions)
predictions = {
k: np.array(v[split_idx:]) for k, v in predictions.items()
}
print()
#
# table = []
# print(' & '.join(['step', 'geh', 'mape', 'rmse'])+' \\\\')
# for step in steps:
# # true values
# stepped_vals = flow_values[step:len(predictions[step])]
# # predicted values
# pred_vals = predictions[step][:-step] + eps
# table.append(OrderedDict([
# ('steps', step),
# ('geh', geh(stepped_vals, pred_vals)),
# ('mape', mape(stepped_vals, pred_vals)),
# ('rmse', rmse(stepped_vals, pred_vals))
# ]))
# print(tabulate.tabulate(table, 'keys', 'latex'))
print("Loading matplotlib")
font = {'size': 30}
import matplotlib
matplotlib.rc('font', **font)
import matplotlib.pyplot as plt
true_y = []
true_x = []
pred_y = []
pred_x = []
print("Predicting data rows: {}".format(data_len - row_count))
progress = pyprind.ProgBar(data_len - row_count, width=50, stream=1)
for row in it:
progress.update()
preds = model.run(row)
if row['timestamp'] > datetime(2013, 6, 15):
break
true_x.append(row['timestamp'])
true_y.append(row['downstream'])
pred_y.append(preds.inferences["multiStepBestPredictions"][1])
pred_x.append(row['timestamp'] + timedelta(minutes=5))
np.savez("pred_data/{}-htm-pred-data".format(fname), true_x=true_x, true_y=true_y, pred_x=pred_x, pred_y=pred_y)
np_tx = np.array(true_x)[1:]
np_ty = np.array(true_y)[1:]
np_py = np.array(pred_y)[:-1]
print()
print("GEH: ", geh(np_ty, np_py))
print("MAPE: ", mape(np_ty, np_py))
print("RMSE: ", rmse(np_ty, np_py))
print()
print("True x:", len(true_x))
print("True y:", len(true_x))
print("Pred y:", len(true_x))
plt.plot(true_x, true_y, 'b-', label='Readings')
plt.plot(pred_x, pred_y, 'r-', label='Predictions')
plt.legend(prop={'size': 23})
plt.grid(b=True, which='major', color='black', linestyle='-')
plt.grid(b=True, which='minor', color='black', linestyle='dotted')
df = "%A %d %B, %Y"
plt.title("3002: Traffic Flow from {} to {}".format(true_x[0].strftime(df), true_x[-1].strftime(df)), y=1.03)
plt.legend()
plt.ylabel("Vehicles/ 5 min")
plt.xlabel("Time")
fig, ax = plt.subplots()
for tick in ax.xaxis.get_minor_ticks():
tick.label.set_fontsize(26)
tick.label.set_rotation('vertical')
plt.show()
| agpl-3.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/core/datetools.py | 6 | 1729 | """A collection of random tools for dealing with dates in Python"""
from pandas.tseries.tools import *
from pandas.tseries.offsets import *
from pandas.tseries.frequencies import *
day = DateOffset()
bday = BDay()
businessDay = bday
try:
cday = CDay()
customBusinessDay = CustomBusinessDay()
customBusinessMonthEnd = CBMonthEnd()
customBusinessMonthBegin = CBMonthBegin()
except NotImplementedError:
cday = None
customBusinessDay = None
customBusinessMonthEnd = None
customBusinessMonthBegin = None
monthEnd = MonthEnd()
yearEnd = YearEnd()
yearBegin = YearBegin()
bmonthEnd = BMonthEnd()
bmonthBegin = BMonthBegin()
cbmonthEnd = customBusinessMonthEnd
cbmonthBegin = customBusinessMonthBegin
bquarterEnd = BQuarterEnd()
quarterEnd = QuarterEnd()
byearEnd = BYearEnd()
week = Week()
# Functions/offsets to roll dates forward
thisMonthEnd = MonthEnd(0)
thisBMonthEnd = BMonthEnd(0)
thisYearEnd = YearEnd(0)
thisYearBegin = YearBegin(0)
thisBQuarterEnd = BQuarterEnd(0)
thisQuarterEnd = QuarterEnd(0)
# Functions to check where a date lies
isBusinessDay = BDay().onOffset
isMonthEnd = MonthEnd().onOffset
isBMonthEnd = BMonthEnd().onOffset
def _resolve_offset(freq, kwds):
if 'timeRule' in kwds or 'offset' in kwds:
offset = kwds.get('offset', None)
offset = kwds.get('timeRule', offset)
if isinstance(offset, compat.string_types):
offset = getOffset(offset)
warn = True
else:
offset = freq
warn = False
if warn:
import warnings
warnings.warn("'timeRule' and 'offset' parameters are deprecated,"
" please use 'freq' instead",
FutureWarning)
return offset
| mit |
robogen/CMS-Mining | RunScripts/es_evntVnet.py | 1 | 13344 | from elasticsearch import Elasticsearch
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.dates import AutoDateLocator, AutoDateFormatter
import numpy as np
import datetime as dt
import math
import json
import pprint
with open("config", "r+") as txt:
contents = list(map(str.rstrip, txt))
esCon = Elasticsearch([{
'host': contents[4], 'port': contents[5]
}], timeout=30)
pp = pprint.PrettyPrinter(indent=4)
def utcDate(time):
return dt.datetime.fromtimestamp(time, dt.timezone.utc)
def utcStamp(time):
return (dt.datetime.strptime(time,'%Y-%m-%dT%X')).replace(tzinfo=dt.timezone.utc).timestamp()
scrollPreserve="3m"
startDate = "2016-07-17T00:00:00"
endDate = "2016-07-25T00:00:00"
utcStart = utcStamp(startDate)
utcEnd = utcStamp(endDate)
oneDay = np.multiply(24,np.multiply(60,60))
def esConAgg(field):
queryBody={"aggs": {
"dev": {
"terms": {"field":field}
}
}
}
scannerCon = esCon.search(index="net-health",
body=queryBody,
search_type="query_then_fetch",
scroll=scrollPreserve)
scrollIdCon = scannerCon['aggregations']['dev']
conTotalRec = scrollIdCon['buckets']
arrRet = np.array([])
if conTotalRec == 0:
return None
else:
for hit in conTotalRec:
arrRet = np.append(arrRet, hit['key'])
return arrRet
def esConQuery(src, dest):
queryBody={"query" :
{"bool": {
"must": [
{"match" :
{"src" : src}
},
{"match" :
{"dest" : dest}
},
{"range" : {
"beginDate" : {
"gt" : int(utcStart),
"lt" : int((utcStart + oneDay))
}
}
}
]
}
}, "sort": {"beginDate": {"order": "desc"}}
}
scannerCon = esCon.search(index="net-health",
body=queryBody,
search_type="scan",
scroll=scrollPreserve)
scrollIdCon = scannerCon['_scroll_id']
conTotalRec = scannerCon["hits"]["total"]
arrRet = {}
arrRet['srcLatency'] = np.array([])
arrRet['destLatency'] = np.array([])
arrRet['srcPacket'] = np.array([])
arrRet['destPacket'] = np.array([])
arrRet['srcThroughput'] = np.array([])
arrRet['destThroughput'] = np.array([])
if conTotalRec == 0:
return None
else:
while conTotalRec > 0:
responseCon = esCon.scroll(scroll_id=scrollIdCon,
scroll=scrollPreserve)
for hit in responseCon["hits"]["hits"]:
if 'srcThroughput' in hit["_source"]:
if not arrRet['srcThroughput'].size > 0:
arrRet['srcThroughput'] = np.reshape(np.array([hit["_source"]["srcThroughput"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]]), (1,3))
else:
arrRet['srcThroughput'] = np.vstack((arrRet['srcThroughput'],
np.array([hit["_source"]["srcThroughput"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]])))
if 'destThroughput' in hit["_source"]:
if not arrRet['destThroughput'].size > 0:
arrRet['destThroughput'] = np.reshape(np.array([hit["_source"]["destThroughput"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]]), (1,3))
else:
arrRet['destThroughput'] = np.vstack((arrRet['destThroughput'],
np.array([hit["_source"]["destThroughput"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]])))
if 'srcPacket' in hit["_source"]:
if not arrRet['srcPacket'].size > 0:
arrRet['srcPacket'] = np.reshape(np.array([hit["_source"]["srcPacket"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]]), (1,3))
else:
arrRet['srcPacket'] = np.vstack((arrRet['srcPacket'],
np.array([hit["_source"]["srcPacket"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]])))
if 'destPacket' in hit["_source"]:
if not arrRet['destPacket'].size > 0:
arrRet['destPacket'] = np.reshape(np.array([hit["_source"]["destPacket"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]]), (1,3))
else:
arrRet['destPacket'] = np.vstack((arrRet['destPacket'],
np.array([hit["_source"]["destPacket"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]])))
if 'srcLatency' in hit["_source"]:
if not arrRet['srcLatency'].size > 0:
arrRet['srcLatency'] = np.reshape(np.array([hit["_source"]["srcLatency"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]]), (1,3))
else:
arrRet['srcLatency'] = np.vstack((arrRet['srcLatency'],
np.array([hit["_source"]["srcLatency"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]])))
if 'destLatency' in hit["_source"]:
if not arrRet['destLatency'].size > 0:
arrRet['destLatency'] = np.reshape(np.array([hit["_source"]["destLatency"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]]), (1,3))
else:
arrRet['destLatency'] = np.vstack((arrRet['destLatency'],
np.array([hit["_source"]["destLatency"],
hit["_source"]["KEvents"],
hit["_source"]["EventRate"]])))
conTotalRec -= len(responseCon['hits']['hits'])
return arrRet
#print(esConAgg("src"))
#print(esConAgg("dest"))
def main(utcStart):
with PdfPages('CMS_RateVSNum.pdf') as pc:
d = pc.infodict()
d['Title'] = 'CMS Scatter Plots'
d['Author'] = u'Jerrod T. Dixon\xe4nen'
d['Subject'] = 'Plot of network affects on grid jobs'
d['Keywords'] = 'PdfPages matplotlib CMS grid'
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
#qResults = esConQuery('t1_de_kit','T1_ES_PIC')
while utcStart <= utcEnd:
srcSites = esConAgg("src")
destSites = esConAgg("dest")
workDate = utcDate(utcStart)
for ping in srcSites:
for pong in destSites:
qResults = esConQuery(ping, pong)
if not type(qResults) == type(None):
srcLatency = qResults['srcLatency']
destLatency = qResults['destLatency']
srcPacket = qResults['srcPacket']
destPacket = qResults['destPacket']
srcThrough = qResults['srcThroughput']
destThrough = qResults['destThroughput']
if srcThrough.size > 0:
figsT, axsT = plt.subplots(2, sharex=True)
axsT[0].scatter(srcThrough[:,0],srcThrough[:,1])
axsT[1].scatter(srcThrough[:,0],srcThrough[:,2])
axsT[0].set_ylabel("KEvents")
axsT[1].set_ylabel("EventRate")
axsT[1].set_xlabel("Source Throughput")
axsT[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figsT)
plt.close(figsT)
if destThrough.size > 0:
figdT, axdT = plt.subplots(2, sharex=True)
axdT[0].scatter(destThrough[:,0],destThrough[:,1])
axdT[1].scatter(destThrough[:,0],destThrough[:,2])
axdT[0].set_ylabel("KEvents")
axdT[1].set_ylabel("EventRate")
axdT[1].set_xlabel("Destination Throughput")
axdT[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figdT)
plt.close(figdT)
if srcPacket.size > 0:
figsP, axsP = plt.subplots(2, sharex=True)
axsP[0].scatter(srcPacket[:,0],srcPacket[:,1])
axsP[1].scatter(srcPacket[:,0],srcPacket[:,2])
axsP[0].set_ylabel("KEvents")
axsP[1].set_ylabel("EventRate")
axsP[1].set_xlabel("Source Packet Loss")
axsP[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figsP)
plt.close(figsP)
if destPacket.size > 0:
figdP, axdP = plt.subplots(2, sharex=True)
axdP[0].scatter(destPacket[:,0],destPacket[:,1])
axdP[1].scatter(destPacket[:,0],destPacket[:,2])
axdP[0].set_ylabel("KEvents")
axdP[1].set_ylabel("EventRate")
axdP[1].set_xlabel("Destination Packet Loss")
axdP[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figdP)
plt.close(figdP)
if srcLatency.size > 0:
figL, axL = plt.subplots(2, sharex=True)
axL[0].scatter(srcLatency[:,0],srcLatency[:,1])
axL[1].scatter(srcLatency[:,0],srcLatency[:,2])
axL[0].set_ylabel("KEvents")
axL[1].set_ylabel("EventRate")
axL[1].set_xlabel("Source Latency")
axL[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figL)
plt.close(figL)
if destLatency.size > 0:
figP, axP = plt.subplots(2, sharex=True)
axP[1].scatter(destLatency[:,0],destLatency[:,2])
axP[0].scatter(destLatency[:,0],destLatency[:,1])
axP[0].set_ylabel("KEvents")
axP[1].set_ylabel("EventRate")
axP[1].set_xlabel("Destination Latency")
axP[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figP)
plt.close(figP)
utcStart = utcStart + oneDay
#axC[1].scatter(destRes[:,0],destRes[:,1])
#axC[1].set_ylabel("CpuEff")
# Run Main code
main(utcStart)
| mit |
dsm054/pandas | pandas/tests/series/indexing/test_alter_index.py | 1 | 17845 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import numpy as np
from numpy import nan
import pytest
import pandas.compat as compat
from pandas.compat import lrange, range
import pandas as pd
from pandas import Categorical, Series, date_range, isna
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize(
'first_slice,second_slice', [
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]]
])
@pytest.mark.parametrize('fill', [None, -1])
def test_align(test_data, first_slice, second_slice, join_type, fill):
a = test_data.ts[slice(*first_slice)]
b = test_data.ts[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, fill_value=fill)
join_index = a.index.join(b.index, how=join_type)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
assert aa.name == 'ts'
assert ea.name == 'ts'
assert ab.name == 'ts'
assert eb.name == 'ts'
@pytest.mark.parametrize(
'first_slice,second_slice', [
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]]
])
@pytest.mark.parametrize('method', ['pad', 'bfill'])
@pytest.mark.parametrize('limit', [None, 1])
def test_align_fill_method(test_data,
first_slice, second_slice,
join_type, method, limit):
a = test_data.ts[slice(*first_slice)]
b = test_data.ts[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, method=method, limit=limit)
join_index = a.index.join(b.index, how=join_type)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
def test_align_nocopy(test_data):
b = test_data.ts[:5].copy()
# do copy
a = test_data.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = test_data.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = test_data.ts.copy()
b = test_data.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = test_data.ts.copy()
b = test_data.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(test_data):
a, b = test_data.ts.align(test_data.ts, copy=False)
assert a.index is test_data.ts.index
assert b.index is test_data.ts.index
a, b = test_data.ts.align(test_data.ts, copy=True)
assert a.index is not test_data.ts.index
assert b.index is not test_data.ts.index
def test_align_multiindex():
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(test_data):
identity = test_data.series.reindex(test_data.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(test_data.series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(test_data.series.index)
assert identity.index.identical(test_data.series.index)
subIndex = test_data.series.index[10:20]
subSeries = test_data.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
assert val == test_data.series[idx]
subIndex2 = test_data.ts.index[10:20]
subTS = test_data.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
assert val == test_data.ts[idx]
stuffSeries = test_data.ts.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = test_data.ts.index[::2]
subNonContig = test_data.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
assert val == test_data.ts[idx]
# return a copy the same index here
result = test_data.ts.reindex()
assert not (result is test_data.ts)
def test_reindex_nan():
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat():
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isna()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes():
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(test_data):
# (don't forget to fix this) I think it's fixed
test_data.empty.reindex(test_data.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = test_data.empty.reindex(test_data.ts.index, method='pad')
# pass non-Index
reindexed = test_data.ts.reindex(list(test_data.ts.index))
assert_series_equal(test_data.ts, reindexed)
# bad fill method
ts = test_data.ts[::2]
pytest.raises(Exception, ts.reindex, test_data.ts.index, method='foo')
def test_reindex_pad():
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inference of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest():
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest',
tolerance=[1, 2, 3, 4])
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = Series([0, np.nan, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill():
pass
def test_reindex_int(test_data):
ts = test_data.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(test_data.ts.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(test_data):
# A series other than float, int, string, or object
ts = test_data.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(test_data.ts.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(test_data):
# fail
ts = test_data.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(test_data.ts.index, method='pad')
assert isna(filled_bool[:5]).all()
def test_reindex_categorical():
index = date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_reindex_like(test_data):
other = test_data.ts[::2]
assert_series_equal(test_data.ts.reindex(other.index),
test_data.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value():
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_reindex_datetimeindexes_tz_naive_and_aware():
# GH 8306
idx = date_range('20131101', tz='America/Chicago', periods=7)
newidx = date_range('20131103', periods=10, freq='H')
s = Series(range(7), index=idx)
with pytest.raises(TypeError):
s.reindex(newidx, method='ffill')
def test_reindex_empty_series_tz_dtype():
# GH 20869
result = Series(dtype='datetime64[ns, UTC]').reindex([0, 1])
expected = Series([pd.NaT] * 2, dtype='datetime64[ns, UTC]')
tm.assert_equal(result, expected)
def test_rename():
# GH 17407
s = Series(range(1, 6), index=pd.Index(range(2, 7), name='IntIndex'))
result = s.rename(str)
expected = s.rename(lambda i: str(i))
assert_series_equal(result, expected)
assert result.name == expected.name
@pytest.mark.parametrize(
'data, index, drop_labels,'
' axis, expected_data, expected_index',
[
# Unique Index
([1, 2], ['one', 'two'], ['two'],
0, [1], ['one']),
([1, 2], ['one', 'two'], ['two'],
'rows', [1], ['one']),
([1, 1, 2], ['one', 'two', 'one'], ['two'],
0, [1, 2], ['one', 'one']),
# GH 5248 Non-Unique Index
([1, 1, 2], ['one', 'two', 'one'], 'two',
0, [1, 2], ['one', 'one']),
([1, 1, 2], ['one', 'two', 'one'], ['one'],
0, [1], ['two']),
([1, 1, 2], ['one', 'two', 'one'], 'one',
0, [1], ['two'])])
def test_drop_unique_and_non_unique_index(data, index, axis, drop_labels,
expected_data, expected_index):
s = Series(data=data, index=index)
result = s.drop(drop_labels, axis=axis)
expected = Series(data=expected_data, index=expected_index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'data, index, drop_labels,'
' axis, error_type, error_desc',
[
# single string/tuple-like
(range(3), list('abc'), 'bc',
0, KeyError, 'not found in axis'),
# bad axis
(range(3), list('abc'), ('a',),
0, KeyError, 'not found in axis'),
(range(3), list('abc'), 'one',
'columns', ValueError, 'No axis named columns')])
def test_drop_exception_raised(data, index, drop_labels,
axis, error_type, error_desc):
with pytest.raises(error_type, match=error_desc):
Series(data, index=index).drop(drop_labels, axis=axis)
def test_drop_with_ignore_errors():
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
tm.assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
tm.assert_series_equal(result, expected)
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 3]])
@pytest.mark.parametrize('drop_labels', [[], [1], [3]])
def test_drop_empty_list(index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
series = pd.Series(index=index).drop(drop_labels)
tm.assert_series_equal(series, pd.Series(index=expected_index))
@pytest.mark.parametrize('data, index, drop_labels', [
(None, [1, 2, 3], [1, 4]),
(None, [1, 2, 2], [1, 4]),
([2, 3], [0, 1], [False, True])
])
def test_drop_non_empty_list(data, index, drop_labels):
# GH 21494 and GH 16877
with pytest.raises(KeyError, match='not found in axis'):
pd.Series(data=data, index=index).drop(drop_labels)
| bsd-3-clause |
elkingtonmcb/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
apatti/apatti_ml | kaggle/digit-recognizer/knn.py | 1 | 2786 | from numpy import *
import matplotlib
import matplotlib.pyplot as plt
#Distance = SqRoot(Sum((X-Label)^2))
def classify(input,data,labels,k):
#converting input array to be matrix of same size as data so that we can use it for subtract
diffMat = tile(input,(data.shape[0],1)) - data
sqDiffMat = diffMat**2
sumSqDiffMat = sum(sqDiffMat,axis=1)
distances = sumSqDiffMat**0.5
sortedIndices = distances.argsort() # sort the indexes
votedLabels={}
for i in range(k):
votedLabels[labels[sortedIndices[i]]] = votedLabels.get(labels[sortedIndices[i]],0)+1
return max(votedLabels.iteritems(),key=lambda x:x[1])[0]
def fileToMat(file,labelsPresent):
labels=None
fileData = genfromtxt(file,delimiter=',',skip_header=1,dtype="int")
if labelsPresent:
labels = fileData[:,0]
fileData=fileData[:,1:]
return fileData,labels
def visualize(data,labels):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(data[:,0],data[:,1],15.0*labels,15.0*labels)
ax.set_ylabel("Percentage of time spent playing video game")
ax.set_xlabel("Liters of icecream consumed per week")
ax.set_title("Scatter Plot")
#plt.legend(loc='upper center')
plt.show()
def normalize(data):
min = data.min(0)
max = data.max(0)
range = max-min
normData = (data-tile(min,(data.shape[0],1)))/tile(range,(data.shape[0],1))
return normData,range,min
def testClassifier():
data,labels = fileToMat("data/train.csv",True)
#normData,ranges,min = normalize(data)
testPercent=0.1
#numTestVectors=int(testPercent*normData.shape[0])
numTestVectors = 3000
for k in range(1):
errorCount=0.0
for i in range(numTestVectors):
classifiedLabel = classify(data[i],data[numTestVectors:9000,],labels[numTestVectors:9000,],77)
#print r'Test:%d,Actual:%d' %(classifiedLabel,labels[i])
if(classifiedLabel!=labels[i]):
errorCount=errorCount+1.0
print r'K:%d,Error Rate:%f'%(k,((errorCount/float(numTestVectors))*100))
def digitRecognizer():
trainData,labels = fileToMat("data/train.csv",True)
testData,trainLabels = fileToMat("data/test.csv",False)
classifiedResult = zeros((testData.shape[0],2))
#with open("testResult.csv","w") as outputFile:
for i in range(testData.shape[0]):
#classifierResult = classify(testData[i],trainData,labels,3)
#outputFile.write("%d,%d\n"%(i+1,classifierResult))
classifiedResult[i,0]=i+1
classifiedResult[i,1]=classify(testData[i],trainData,labels,3)
print "%d "%(i+1)
savetxt("testResult_apatti_3.csv",classifiedResult,delimiter=',',fmt="%d,%d",header='ImageId,Label')
if __name__ == "__main__":
digitRecognizer()
| mit |
CharlesGulian/Deconv | Main3.py | 1 | 13448 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 30 15:34:39 2016
@author: charlesgulian
"""
import os
curr_dir = os.getcwd()
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib
import pysex
import sex_stats
import fits_tools
#import sex_config
#import do_config
# Image deconvolution project:
# Main script for data analysis, image comparison, photometric statistics, and more
# Good image comparison
goodImage1 = 'AstroImages/Good/fpC-6484-x4078-y134_stitched_alignCropped.fits'
goodImage2 = 'AstroImages/Good/fpC-7006-x5226-y115_stitched_alignCropped.fits'
goodImage3 = 'AstroImages/Good/fpC-4868-x4211-y138_stitched_alignCropped.fits'
goodImage4 = 'AstroImages/Good/fpC-6383-x5176-y121_stitched_alignCropped.fits'
goodImgs = [goodImage1,goodImage2,goodImage3,goodImage4]
# Bad image comparison:
badImage1 = 'AstroImages/Bad/fpC-5759-x24775-y300_stitched_alignCropped.fits' # Modest gradient from top to bottom
badImage2 = 'AstroImages/Bad/fpC-6548-x24940-y302_stitched_alignCropped.fits' # Modest gradient from top to bottom
badImage3 = 'AstroImages/Bad/fpC-5781-x25627-y293_stitched_alignCropped.fits' # Very weak gradient from bottom left to top right
badImage4 = 'AstroImages/Bad/fpC-7140-x24755-y270_stitched_alignCropped.fits' # Weak gradient from bottom left to top right
badImgs = [badImage1,badImage2,badImage3,badImage4]
def createHist(data,numBins=20,color='green',dataName='',save=True,show=False,normed=False):
# Plot histogram of data
plt.hist(data,bins=numBins,normed=normed,color=color,stacked=True)
if dataName != '':
plt.title('Histogram of {}'.format(dataName))
plt.ylabel('Frequency (N)')
plt.xlabel(dataName)
if save:
print 'Saving histogram to {}'.format(os.path.join(curr_dir,'Figures','Jul15Imgs','Hists','Hist_{}.png'.format(dataName)))
plt.savefig(os.path.join(curr_dir,'Figures','Jul15Imgs','Hists','Hist_{}__.png'.format(dataName)))
plt.close()
if show:
plt.show()
for testImage1 in goodImgs:
for testImage2 in goodImgs:
if testImage1 == testImage2:
continue
output = pysex.compare(testImage1,testImage2) # (Implement/uncomment to create new comparison file)
img_tag1 = (os.path.split(testImage1)[1])
img_tag1 = img_tag1[0:len(img_tag1)-len('.fits')]
img_tag2 = (os.path.split(testImage2)[1])
img_tag2 = img_tag2[0:len(img_tag2)-len('.fits')]
outputCat1 = os.path.join(os.getcwd(),'Results',img_tag1+'_'+img_tag1+'_compare.cat')
if not os.path.exists(outputCat1):
print 'Error: first output catalog path does not exist'
outputCat2 = os.path.join(os.getcwd(),'Results',img_tag1+'_'+img_tag2+'_compare.cat')
if not os.path.exists(outputCat2):
print 'Error: second output catalog path does not exist'
# Create sex_stats.data objects:
img1data = sex_stats.data(outputCat1)
img2data = sex_stats.data(outputCat2)
# Create .reg files from output catalogs
CREATE_regFiles = False
if CREATE_regFiles:
img1data.create_regFile()
img2data.create_regFile()
#-----------------------------------------------------------------------------#
# Flux ratio analysis:
flux1,flux2 = img1data.get_data('FLUX_BEST'),img2data.get_data('FLUX_BEST')
#
mag1,mag2 = img1data.get_data('MAG_BEST'),img2data.get_data('MAG_BEST')
#flux1,flux2 = mag1,mag2
x,y = img1data.get_data('X_IMAGE'),img1data.get_data('Y_IMAGE')
flux1,flux2 = np.array(flux1),np.array(flux2)
# Correct for image bias of 1000.0
imageBias = 1000.0
flux1 -= imageBias
flux2 -= imageBias
#'''
print ' '
print 'Minimum flux values: ', np.min(flux1),' ',np.min(flux2)
print 'Minimum pixel values: ', np.min(fits_tools.getPixels(testImage1)),' ',np.min(fits_tools.getPixels(testImage2))
print 'Median value of images: ', np.median(fits_tools.getPixels(testImage1)),' ',np.median(fits_tools.getPixels(testImage2))
print 'Mean value of images: ', np.mean(fits_tools.getPixels(testImage1)),' ',np.mean(fits_tools.getPixels(testImage2))
print ' '
#'''
#'''
fluxAvg = 0.5*(flux1+flux2)
fluxRatio = np.divide(flux1,flux2)
fluxRatio_mean = np.mean(fluxRatio)
fluxRatio_std = np.std(fluxRatio)
fluxRatio_meanSubtracted = fluxRatio - fluxRatio_mean # (NOT MEAN SUBTRACTED)
#'''
# Without sigma-clipping
# Creating histogram of flux1 and flux2
createHist(img1data.get_data('MAG_BEST'),numBins=70,color='green',save=False,normed=True)
createHist(img2data.get_data('MAG_BEST'),numBins=70,color='blue',dataName='Object mag for Entire Image',save=True,normed=True)
# Creating histogram of flux1/flux2 (object-wise flux ratio)
createHist(fluxRatio,numBins=70,dataName='Flux Ratio')
m,n = 4,4
xBins,yBins,fluxRatioBins = sex_stats.binData(x,y,fluxRatio,M=m,N=n)
for i in range(m):
for j in range(n):
if (i == max(range(m))) and (j == max(range(n))):
plt.title('Histogram of Flux Ratio in Bins: {}x{}'.format(m,n))
plt.subplot(m,n,(n*i + (j+1)))
plt.axis([np.min(fluxRatioBins[i,j]),np.max(fluxRatioBins[i,j]),0.0,10.0])
createHist(fluxRatioBins[i,j],save=True,normed=False)
else:
plt.subplot(m,n,(n*i + (j+1)))
createHist(fluxRatioBins[i,j],save=False,normed=False)
plt.axis([np.min(fluxRatioBins[i,j]),np.max(fluxRatioBins[i,j]),0.0,10.0])
'''
fluxRatioBin_Avgs = np.zeros([m,n])
emptyBins = []
for i in range(m):
for j in range(n):
# Clipping data in bins:
fluxRatioBins_sigmaClipped = []
fluxRatioBins_excess = []
for k in range(len(fluxRatioBins[i,j])):
if np.abs((fluxRatioBins[i,j])[k]) <= maxSig[s]*np.std(fluxRatioBins[i,j]):
fluxRatioBins_sigmaClipped.append(fluxRatioBins[i,j][k])
else:
fluxRatioBins_excess.append()
if len(fluxRatioBins_sigmaClipped) == 0:
emptyBins.append('{},{}'.format(str(i),str(j)))
fluxRatioBins[i,j] = fluxRatioBins_sigmaClipped
fluxRatioBin_Avgs[i,j] = np.mean(fluxRatioBins_sigmaClipped)
# Masking NaNs in fluxRatioBin_Avgs:
fluxRatioBin_Avgs_Masked = np.ma.array(fluxRatioBin_Avgs,mask=np.isnan(fluxRatioBin_Avgs))
cmap = matplotlib.cm.gray
cmap.set_bad('r',1.)
#print np.nanmean(fluxRatioBin_Avgs)-2.0,' ',np.nanmean(fluxRatioBin_Avgs)+2.0
plt.pcolormesh(fluxRatioBin_Avgs_Masked,cmap=cmap,vmin=np.nanmean(fluxRatioBin_Avgs)-2.0,vmax=np.nanmean(fluxRatioBin_Avgs)+2.0)
plt.colorbar()
plt.xlabel('X Bin')
plt.ylabel('Y Bin')
plt.title('Flux Ratio Bin Averages: {} x {}'.format(m,n))
if not os.path.exists(os.path.join(curr_dir,'Figures','Jul14Imgs','ObjBin','{}_{}'.format(img_tag1[0:10],img_tag2[0:10]))):
os.mkdir(os.path.join(curr_dir,'Figures','Jul14Imgs','ObjBin','{}_{}'.format(img_tag1[0:10],img_tag2[0:10])))
plt.savefig(os.path.join(curr_dir,'Figures','Jul14Imgs','ObjBin','{}_{}'.format(img_tag1[0:10],img_tag2[0:10]),'fluxRatioBin_Avgs_sigmaClip{}.png'.format(str(maxSig[s])[0:4])))
plt.close()
plot = False # Warning: do not change to true unless length of maxSig small
if plot:
# Plotting source-wise flux ratio w/ colors
plt.scatter(x_clip, y_clip, s=25*np.log10(0.1*np.array(fluxAvg_clip)), c=fluxRatio_meanSubtracted_sigmaClipped, vmin=-1.5*maxSig[j]*fluxRatio_std, vmax=1.5*maxSig[j]*fluxRatio_std, alpha=0.75)
plt.axis([0,1600,0,1600])
plt.colorbar()
plt.xlabel('X_IMAGE')
plt.ylabel('Y_IMAGE')
plt.title('Flux Ratio Color Map: sigma cutoff = '+str(maxSig[j])[0:4])
plt.savefig((curr_dir+'/Figures/{}_{}_maxSig{}_fluxRatio_LINETEST.png'.format(img_tag1, img_tag2, str(maxSig[j])[0:4])))
plt.close()
#'''
break
break
""" THIS SECTION OF CODE WAS COMMENTED OUT ON July 12th, 2016; uncomment to do statistical analysis
chiSqNorm_linear = []
chiSqNorm_flat = []
rSqAdj = []
numPoints = []
for j in range(len(maxSig)):
# Clipping data
fluxRatio_meanSubtracted_sigmaClipped = []
fluxRatio_excess = []
fluxAvg_clip = []
x_clip,y_clip = [],[]
x_exc,y_exc = [],[]
for i in range(len(fluxRatio_meanSubtracted)):
if np.abs(fluxRatio_meanSubtracted[i]) < maxSig[j]*fluxRatio_std:
fluxRatio_meanSubtracted_sigmaClipped.append(fluxRatio_meanSubtracted[i])
x_clip.append(x[i])
y_clip.append(y[i])
fluxAvg_clip.append(fluxAvg[i])
else:
fluxRatio_excess.append(fluxRatio_meanSubtracted[i])
x_exc.append(x[i])
y_exc.append(y[i])
fluxRatio_meanSubtracted_sigmaClipped,fluxRatio_excess = np.array(fluxRatio_meanSubtracted_sigmaClipped),np.array(fluxRatio_excess)
x_clip,y_clip,x_exc,y_exc = np.array(x_clip),np.array(y_clip),np.array(x_exc),np.array(y_exc)
numPoints.append(float(len(x_clip)))
# Analyzing goodness-of-fit of 3D linear model fitted to data:
coeffs = sex_stats.linReg3D(x_clip,y_clip,fluxRatio_meanSubtracted_sigmaClipped)[0]
linearModelPoints = coeffs[0] + coeffs[1]*x_clip + coeffs[2]*y_clip
flatModelPoints = np.ones(np.shape(fluxRatio_meanSubtracted_sigmaClipped))*fluxRatio_mean
# SciPy: scipy.stats.chisquare
#CSN_lin = spst.chisquare()
CSN_lin = sex_stats.chiSquareNormalized(fluxRatio_meanSubtracted_sigmaClipped,linearModelPoints,3)
CSN_flat = sex_stats.chiSquareNormalized(fluxRatio_meanSubtracted_sigmaClipped,flatModelPoints,1)
RSA = sex_stats.rSquaredAdjusted(fluxRatio_meanSubtracted_sigmaClipped,linearModelPoints,3)
chiSqNorm_linear.append(CSN_lin)
chiSqNorm_flat.append(CSN_flat)
rSqAdj.append(RSA)
plot = True # Warning: do not change to true unless length of maxSig small
if plot:
# Plotting source-wise flux ratio w/ colors
plt.scatter(x_clip, y_clip, s=25*np.log10(0.1*np.array(fluxAvg_clip)), c=fluxRatio_meanSubtracted_sigmaClipped, vmin=-1.5*maxSig[j]*fluxRatio_std, vmax=1.5*maxSig[j]*fluxRatio_std, alpha=0.75)
plt.axis([0,1600,0,1600])
plt.colorbar()
plt.xlabel('X_IMAGE')
plt.ylabel('Y_IMAGE')
plt.title('Flux Ratio Color Map: sigma cutoff = '+str(maxSig[j])[0:4])
plt.savefig((curr_dir+'/Figures/{}_{}_maxSig{}_fluxRatio_LINETEST.png'.format(img_tag1, img_tag2, str(maxSig[j])[0:4])))
plt.close()
hist = False # Warning: do not change to true unless length of maxSig small
if hist:
# Plotting histogram of flux ratio
plt.hist(fluxRatio_meanSubtracted_sigmaClipped,bins=20,color='green')
plt.title('Histogram of Flux Ratio')
plt.ylabel(('Mean subtracted + clipped @ {} sigma').format(str(maxSig[j])[0:4]))
plt.xlabel('Flux ratio')
plt.savefig((curr_dir+'/Figures/Hist_{}_{}_maxSig{}_fluxRatio.png'.format(img_tag1, img_tag2, str(maxSig[j])[0:4])))
plt.close()
# Changing lists to NumPy arrays:
# chiSqNorm_linear,chiSqNorm_flat,rSqAdj = np.array(chiSqNorm_linear),np.array(chiSqNorm_flat),np.array(rSqAdj)
# Number of data points analyzed:
numPoints = np.array(numPoints)
numPoints = numPoints*(1.0/float(len(fluxRatio)))
# Plotting reduced chi-square statistic
plt.close()
plt.plot(maxSig,chiSqNorm_linear,'r-',label='Linear model')
plt.plot(maxSig,chiSqNorm_flat,'b-',label='Flat model')
plt.plot(maxSig,numPoints,'0.35',label='Frac. of data points')
plt.legend()
plt.axis([-0.1,1.0,0.0,3.0])
plt.title('Normalized Chi-square vs. Sigma Cutoff: 3D Linear Eq. + Gaussian Noise Test')
plt.ylabel('Normalized Chi-square: Linear')
plt.xlabel('Sigma Cutoff (# standard deviations from mean)')
plt.ylabel('Normalized Chi-square')
#plt.savefig(os.path.join(os.getcwd(),'Figures','StatAnalysis','Linear_eq_test_6'))
plt.show()
'''
# Plotting adjusted r-squared statistic
plt.plot(maxSig,rSqAdj,'k-')
plt.axis([0.0,1.0,-1.1,1.1])
plt.title('Adjusted r-Squared vs. Sigma Cutoff')
plt.xlabel('Sigma Cutoff (# standard deviations from mean)')
plt.ylabel('Adjusted r-Squared')
plt.show()
'''
"""
''' # Testing with artificial data: random noise and linear equations with noise
# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:
#temp = np.std(flux1)*np.random.randn(flux1.size) + np.mean(flux1)
#flux1 = temp
#flux1 = np.mean(flux1)*np.ones(flux1.size) + np.random.randn(flux1.size)
#temp = (np.mean(flux2)/np.mean(flux1))*flux1 + np.std(flux2)*np.random.randn(flux2.size)
#flux2 = temp
#flux2 += 10.0*(x + y - np.mean(x) - np.mean(y))
# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:# TESTING:
#''' | gpl-3.0 |
wenzheli/python_new | com/uva/graph.py | 1 | 4731 | import matplotlib.pyplot as plt
import math
from pylab import *
import numpy as np
from numpy import convolve
def movingaverage (values, window):
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'valid')
return sma
mcmc= open("/home/liwenzhe/workspace/SGRLDForMMSB/results/Netscience_k100_400/result_mcmc.txt", 'r')
svi = open("/home/liwenzhe/workspace/SGRLDForMMSB/results/Netscience_k100_400/result_svi.txt", 'r')
lines_mcmc = mcmc.readlines()
lines_svi = svi.readlines()
n = len(lines_mcmc)
m = len(lines_svi)
ppx_mcmc = np.zeros(n)
ppx_svi = np.zeros(n)
for i in range(0,n):
strs_mcmc = lines_mcmc[i].split()
ppx_mcmc[i] = float(strs_mcmc[0])
if i >= m:
ppx_svi[i] = ppx_svi[m-1]
else:
strs_svi = lines_svi[i].split()
ppx_svi[i] = float(strs_svi[0])
t =arange(0.0, n, 1)
print n
print(len(t))
"""
p1, =plot(t/12, ppx_svi)
p2, =plot(t/12,ppx_mcmc)
legend([p1,p2], ["Stochastic variational inference", "Mini-batch MCMC Sampling"])
xlabel('time (m)')
ylabel('perplexity')
title('Perplexity for relativity data set(using stratified random node sampling ')
grid(True)
savefig("relativity.png")
show()
"""
##############################################################
###### Figure 1 #
##############################################################
gibbs= open("results/testdata_k10/ppx_gibbs_sampler.txt", 'r')
svi = open("results/testdata_k10/ppx_variational_sampler.txt", 'r')
mcmc_online = open("results/testdata_k10/ppx_mcmc_stochastic.txt", 'r')
mcmc_batch=open("results/testdata_k10/ppx_mcmc_batch.txt", 'r')
lines_gibbs = gibbs.readlines()
lines_svi = svi.readlines()
lines_mcmc_online = mcmc_online.readlines()
lines_mcmc_batch = mcmc_batch.readlines()
n1 = len(lines_gibbs)
n2 = len(lines_svi)
n3 = len(lines_mcmc_batch)
n4 = len(lines_mcmc_online)
# plot the gibbs sampler
ppx_gibbs =[]
times_gibbs = np.zeros(n1)
ppx_svi = []
times_svi = np.zeros(n2)
ppx_mcmc_batch = []
times_mcmc_batch=np.zeros(n3)
ppx_mcmc_online = []
times_mcmc_online=np.zeros(n4)
avg_mcmc = []
avg_svi = []
avg_gibbs = []
avg_batch = []
for i in range(0, n1):
strs = lines_gibbs[i].split()
ppx_gibbs.append(float(strs[0]))
avg_gibbs.append(np.mean(ppx_gibbs))
times_gibbs[i] = float(strs[1])
for i in range(0, n2):
strs = lines_svi[i].split()
ppx_svi.append(float(strs[0]))
times_svi[i] = float(strs[1])
avg_svi.append(np.mean(ppx_svi))
for i in range(0, n3):
strs = lines_mcmc_batch[i].split()
ppx_mcmc_batch.append(float(strs[0]))
times_mcmc_batch[i] = float(strs[1])
avg_batch.append(np.mean(ppx_mcmc_batch))
for i in range(0, n4):
strs = lines_mcmc_online[i].split()
ppx_mcmc_online.append(float(strs[0]))
times_mcmc_online[i] = float(strs[1])
avg_mcmc.append(np.mean(ppx_mcmc_online))
figure(1)
p1, =plot(times_gibbs, avg_gibbs)
p2, =plot(times_svi,avg_svi)
p3, =plot(times_mcmc_batch, avg_batch)
p4, =plot(times_mcmc_online, avg_mcmc)
legend([p1,p2,p3,p4], ["Collapsed Gibbs Sampler", "Stochastic Variational Inference","Batch MCMC", "Mini-batch MCMC"])
xlabel('time (s)')
ylabel('perplexity')
title('Perplexity for testing data set')
xlim([1,1000])
grid(True)
savefig("small_data_4_methods_k10.png")
show()
#####################################################
### Figure 2 #
#####################################################
svi = open("ppx_gibbs_sampler.txt", 'r')
mcmc_online = open("ppx_mcmc_stochastic.txt", 'r')
lines_svi = svi.readlines()
lines_mcmc_online = mcmc_online.readlines()
n2 = len(lines_svi)
n4 = len(lines_mcmc_online)
ppx_svi = []
times_svi = np.zeros(n2)
ppx_mcmc_online = []
times_mcmc_online=np.zeros(n4)
avg_mcmc = []
avg_svi = []
for i in range(0, n2):
strs = lines_svi[i].split()
ppx_svi.append(float(strs[0])+1.3)
times_svi[i] = i*10;
avg_svi.append(np.mean(ppx_svi))
for i in range(0, n4):
strs = lines_mcmc_online[i].split()
ppx_mcmc_online.append(float(strs[0])+1.3)
times_mcmc_online[i] = float(strs[1]);
avg_mcmc.append(np.mean(ppx_mcmc_online))
axis_font = {'size':'18'}
params = {'legend.fontsize': 18,
'legend.linewidth': 2}
plt.figure()
plt.rcParams.update(params)
p2, =plot(times_svi ,avg_svi,'r',linewidth=3.0)
p4, =plot(times_mcmc_online, avg_mcmc,'b',linewidth=3.0)
plt.legend(loc=2,prop={'fontsize':18})
legend([p4,p2], ["Stochastic mini-batch MCMC","Collapsed Gibbs Sampler"])
xlabel('time (seconds)',**axis_font)
ylabel('perplexity',**axis_font)
plt.title('US-air Data (K=15)',**axis_font)
xlim([0,1600])
ylim([2,10])
grid(True)
savefig("us_air_mcmc_gibbs.png")
show()
| gpl-3.0 |
Diviyan-Kalainathan/causal-humans | Preprocessing/plot_gen.py | 1 | 4388 | '''
Generate plots from var info
24/05/2016
'''
import csv, numpy
import matplotlib.pyplot as plt
num_bool = []
spec_note = []
type_var = []
color_type = []
category = []
obj_subj=[]
category_type = []
obj_subj_type =[]
mode=2
flags=False
if mode==1:
with open('input/Variables_info.csv', 'rb') as datafile:
var_reader = csv.reader(datafile, delimiter=',')
header_var = next(var_reader)
for var_row in var_reader:
type_var += [var_row[1]]
num_bool += [var_row[3]]
spec_note += [var_row[4]]
category += [int(var_row[5])]
obj_subj += [var_row[6]]
row_len=0
percent_obj=numpy.zeros((8))
percent_subj=numpy.zeros((8))
for num_col in range(0, 541):
if spec_note[num_col] != 'I':
if type_var[num_col] == 'C' or (type_var[num_col] == 'D' and spec_note[num_col] == 'T'):
row_len += 2 #
color_type += ['C']
color_type += ['FC']
category_type += [category[num_col], category[num_col]]
obj_subj_type += [obj_subj[num_col], [obj_subj[num_col]]]
elif type_var[num_col] == 'D' and spec_note[num_col] != '-2' and spec_note[num_col] != 'T':
# print(num_col)
row_len += int(num_bool[num_col]) + 1
for i in range(0, int(num_bool[num_col])):
color_type += ['D']
category_type += [category[num_col]]
obj_subj_type += [obj_subj[num_col]]
color_type += ['FD']
category_type += [category[num_col]]
obj_subj_type += [obj_subj[num_col]]
total = len(category)
print 'Objectives :' , obj_subj.count('O')
print 'Subjectives :' , obj_subj.count('S')
'''
total = len(category_type)
for i in range(8):
sum_obj=0
sum_subj=0
for j in [j for j, x in enumerate(category_type) if x == i]:
if obj_subj_type[j]=='O':
sum_obj+=1
elif obj_subj_type[j]=='S':
sum_subj+=1
percent_obj[i]= (float(sum_obj)/total)*100
percent_subj[i]=(float(sum_subj)/total)*100
if not flags:
percent_obj=percent_obj[1:]
percent_subj=percent_subj[1:]
'''
for i in range(8):
sum_obj=0
sum_subj=0
for j in [j for j, x in enumerate(category) if x == i]:
if obj_subj[j]=='O':
sum_obj+=1
elif obj_subj[j]=='S':
sum_subj+=1
percent_obj[i]= (float(sum_obj)/total)*100
percent_subj[i]=(float(sum_subj)/total)*100
if not flags:
percent_obj=percent_obj[1:]
percent_subj=percent_subj[1:]
N=7
ind = numpy.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, percent_obj, width, color='b')
rects2 = ax.bar(ind + width, percent_subj, width, color='r')
# add some text for labels, title and axes ticks
ax.set_ylabel('Proportion des types de questions (%)')
ax.set_title('Proportion des types de questions en fonction des categories')
ax.set_xticks(ind + width)
ax.set_xticklabels(['Activite\n professionnelle/ \n statut', 'Organisation du \ntemps de travail'
, 'Contraintes \nphysiques, \nprevention et accidents', 'Organisation du travail'
, 'Sante', 'Parcours familial \net professionnel', 'Risques \n pyschosociaux'])#'Drapeaux',
ax.legend((rects1[0], rects2[0]), ('Objectives', 'Subjectives'))
elif mode==2:
with open('input/datacsv.csv','rb') as inputfile:
reader=csv.reader(inputfile,delimiter=';')
header=next(reader)
print(len(header))
dataind=[]
count=0
for row in reader:
count+=1
for data in row:
if data==''or data=='NA':
dataind+=[0]
else:
dataind+=[1]
print(numpy.mean(dataind))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
#autolabel(rects1)
#autolabel(rects2)
plt.show()
| mit |
antiface/mne-python | examples/time_frequency/plot_time_frequency_sensors.py | 7 | 2482 | """
==============================================================
Time-frequency representations on topographies for MEG sensors
==============================================================
Both average power and intertrial coherence are displayed.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import tfr_morlet
from mne.datasets import somato
print(__doc__)
###############################################################################
# Set parameters
data_path = somato.data_path()
raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
event_id, tmin, tmax = 1, -1., 3.
# Setup for reading the raw data
raw = io.Raw(raw_fname)
baseline = (None, 0)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6))
###############################################################################
# Calculate power and intertrial coherence
freqs = np.arange(6, 30, 3) # define frequencies of interest
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
# Baseline correction can be applied to power or done in plots
# To illustrate the baseline correction in plots the next line is commented
# power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
# Inspect power
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio')
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', vmin=-0.45, vmax=0.45)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', vmin=-0.45, vmax=0.45)
mne.viz.tight_layout()
# Inspect ITC
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
| bsd-3-clause |
Erotemic/hotspotter | _scripts/robust_functions.py | 2 | 1500 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
def L2(x):
return x**2
def L1(x):
return np.abs(x)
def Geman_McClure(x, a=1.0):
'a = outlier threshold'
return (x**2) / (1 + (x**2 / a**2))
def Cauchy(x):
pass
def Beaton_Tukey(x, a=4.0):
return (a**2)/6 * (1.0 - (1.0 - (x/a)**2)**3)**(np.array(np.abs(x) <= a, dtype=np.float))
#np.array[(a**2)/6 * (1 - (1 - (u/a)**2)**3) if np.abs(u) <= a else (a**2)/6 for u in x]
def Beaton_Tukey_weight(x, a=4.0):
return np.array(np.abs(x) <= a, dtype=np.float) * (1 - (x/a)**2)**2
def visualize_func(func):
x_radius = 42
x_data = np.linspace(-x_radius,x_radius, 1000)
print(func)
func_name = func.func_name
func_vars = func.func_code.co_varnames
print(func_name)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_title(func_name)
if len(func_vars) == 1 or True:
y_data = func(x_data)
plt.plot(x_data, y_data)
else:
pmax = 1
num = 10
for a in np.linspace(-pmax,pmax,num):
color = plt.get_cmap('jet')((a+pmax)/(pmax*2))
y_data = func(x_data, a)
plt.plot(x_data, y_data, color=color, label=('a=%r' % a))
plt.legend()
fig.show()
if __name__ == '__main__':
robust_functions = [L1,L2,Geman_McClure, Beaton_Tukey, Beaton_Tukey_weight]
for func in iter(robust_functions):
visualize_func(func)
try:
__IPYTHON__
except:
plt.show()
pass
| apache-2.0 |
AllenDowney/ThinkStats2 | code/regression.py | 1 | 10016 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import patsy
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
MESSAGE = """If you get this error, it's probably because
you are running Python 3 and the nice people who maintain
Patsy have not fixed this problem:
https://github.com/pydata/patsy/issues/34
While we wait, I suggest running this example in
Python 2, or skipping this example."""
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
except patsy.PatsyError:
raise ValueError(MESSAGE)
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.items():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
boland1992/SeisSuite | build/lib.linux-x86_64-2.7/seissuite/response/FDSN_resp.py | 8 | 14082 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 15:38:50 2015
@author: boland
"""
from obspy.fdsn import Client
from obspy import UTCDateTime
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
import pickle
import os
from scipy.optimize import fsolve
import pylab
# set range of periods that seismic noise gives a resolveable signal:
period_range = [1.,40.]
global freq_range
freq_range = [1./max(period_range), 1./min(period_range)]
global acceptible_channels
acceptible_channels = ['BHZ', 'MHZ', 'LHZ', 'VHZ', 'UHZ']
#'BNZ', 'MNZ', 'LNZ', 'VNZ', 'UNZ']
outfolder = '/storage/ANT/NZ Station Responses'
# create list of all possible FDSN clients that work under obspy.
client_list = (u'BGR', u'ETH', u'GEONET', u'GFZ', u'INGV',
u'IPGP', u'IRIS', u'KOERI', u'LMU', u'NCEDC',
u'NEIP', u'NERIES', u'ODC', u'ORFEUS', u'RESIF',
u'SCEDC', u'USGS', u'USP')
client = Client("GEONET")
starttime = UTCDateTime("2014-01-01")
endtime = UTCDateTime("2015-01-01")
inventory = client.get_stations(network="*",
station="*",
loc='*',
channel="*Z",
starttime=starttime,
endtime=endtime,
level="response")
for net in inventory:
print net
for sta in net:
print sta
quit()
# save all response plots
#inventory[0].plot_response(min_freq=1E-4,
# channel="BHZ",
# location="10",
# outfile=None)
#help(inventory[0][0])
# goal: to populate a list of stations with appropriate seismic noise frequency
# response ranges.
def find_sample(reponse):
"""
Function that can find the sampling rate for a given station.
"""
for stage in reponse.response_stages[::-1]:
if (stage.decimation_input_sample_rate is not None and
stage.decimation_factor is not None):
sampling_rate = (stage.decimation_input_sample_rate /
stage.decimation_factor)
break
else:
msg = ("Failed to autodetect sampling rate of channel from "
"response stages. Please manually specify parameter "
"`sampling_rate`")
raise Exception(msg)
return sampling_rate
def get_response(min_freq, response, sampling_rate):
t_samp = 1.0 / sampling_rate
nyquist = sampling_rate / 2.0
nfft = sampling_rate / min_freq
cpx_response, freq = response.get_evalresp_response(
t_samp=t_samp, nfft=nfft)
return cpx_response, freq
def response_window(cpx_response, freq, tolerance=0.7):
"""
Function that can evaluate the response of a given seismic instrument and
return a frequency "window" for which the instrument is most effective.
The lower the tolerance value (must be float between 0 and 1), the larger but
less accurate the frequency window will be.
"""
#make sure that the gain response array is a numpy array
cpx_response = np.asarray(cpx_response)
# first find maximum gain response in cpx_reponse
max_gain = np.max(cpx_response)
gain_tol = max_gain * tolerance
arr2 = np.column_stack((freq, abs(cpx_response)))
# find indices of cpx_reponse where the grain is above the tolerance
gain_above = np.argwhere(cpx_response >= gain_tol)
lower_index, upper_index = gain_above[0], gain_above[-1]
arr3 = arr2[lower_index:upper_index]
window = np.vstack((arr3[0], arr3[-1]))
#plt.figure()
#plt.plot(freq, abs(cpx_response))
#plt.plot(arr3[:,0], arr3[:,1], c='r')
#plt.scatter(window[:,0], window[:,1], c='g', s=30)
#plt.show()
return window
def freq_check(freq_range, freq_window):
"""
Function to return True if any of frequencies in the frequency range
found using the response_window function are contained within the
freq_range set in the initial variables of this programme.
"""
boolean = False
if any(np.min(freq_range) < freq < np.max(freq_range) \
for freq in freq_window):
boolean = True
return boolean
def response_plots(inventory, outfolder, acceptible_channels):
min_freq = 1e-4
for net in inventory:
for sta in net:
#print sta.code
channels = sta.channels
for channel in channels:
if str(channel.code) in acceptible_channels:
resp = channel.response
sample_rate = find_sample(resp)
cpx_response, freq = get_response(min_freq, resp, sample_rate)
window = response_window(cpx_response, freq)
#plt.figure()
#plt.loglog(freq, abs(cpx_response))
#plt.plot(window[:,0], window[:,1],'r')
#plt.scatter(freq_range, [ np.max(cpx_response),
# np.max(cpx_response)], c='g', s=35)
#plt.show()
outname = '{}.{}.{}.{}.svg'.format(str(net.code),
str(sta.code),
str(channel.location_code),
str(channel.code))
print outname
outfile = os.path.join(outfolder,outname)
resp.plot(min_freq, outfile = outfile)
freq_window = window[:,0]
print freq_check(freq_range, freq_window)
def resp_in_window(inventory, freq_range, acceptible_channels):
"""
Function to return a list of all station codes that whose
frequency response window contains any frequencies in the frequency
range specified for the given study e.g. 0.025-1Hz for current ambient
noise studies (2015).
"""
min_freq = 1e-4
chan_codes = []
for net in inventory:
for sta in net:
#print sta.code
channels = sta.channels
for channel in channels:
if str(channel.code) in acceptible_channels:
resp = channel.response
sample_rate = find_sample(resp)
cpx_response, freq = get_response(min_freq,
resp,
sample_rate)
window = response_window(cpx_response, freq)
#plt.figure()
#plt.loglog(freq, abs(cpx_response))
#plt.plot(window[:,0], window[:,1],'r')
#plt.scatter(freq_range, [ np.max(cpx_response),
# np.max(cpx_response)], c='g', s=35)
#plt.show()
freq_window = window[:,0]
check = freq_check(freq_range, freq_window)
chan_code = '{}.{}.{}.{}'.format(str(net.code),
str(sta.code),
str(channel.location_code),
str(channel.code))
if chan_code not in chan_codes and check:
chan_codes.append(chan_code)
return chan_codes
#help(inventory[0][0])
#resp = inventory[0][0][0].response
#print resp
# list of acceptible channels for ambient noise studies
#print inventory.get_contents()['channels']
#print inventory.get_contents().keys()
#for inv in inventory:
# try:
# inv.plot_response(min_freq=1E-4,
# channel="BHZ",
# location="10",
# outfile=None)
def get_latlon(inv, check_channels=False, check_codes=False):
"""
Function to return latitude and longitude coordinates of all stations in
an obspy inventory class object.
"""
lats = []
lons = []
for net in inv:
for sta in net:
label_ = " " + ".".join((net.code, sta.code))
if sta.latitude is None or sta.longitude is None:
msg = ("Station '%s' does not have latitude/longitude "
"information and will not be plotted." % label_)
print msg
continue
for channel in sta.channels:
# perform another loop to check if the channels for the station contain
# any of the acceptible channels for ambient noise tomography.
if check_channels:
channels = sta.channels
channel_list = []
for channel in channels:
channel_list.append(channel.code)
if any(item in acceptible_channels for item in channel_list):
lats.append(sta.latitude)
lons.append(sta.longitude)
elif check_codes:
chan_codes = resp_in_window(inv,
freq_range,
acceptible_channels)
chan_code = '{}.{}.{}.{}'.format(str(net.code),
str(sta.code),
str(channel.location_code),
str(channel.code))
if chan_code in chan_codes:
lats.append(sta.latitude)
lons.append(sta.longitude)
elif check_codes and check_channels:
chan_codes = resp_in_window(inv,
freq_range,
acceptible_channels)
chan_code = '{}.{}.{}.{}'.format(str(net.code),
str(sta.code),
str(channel.location_code),
str(channel.code))
channels = sta.channels
channel_list = []
for channel in channels:
channel_list.append(channel.code)
if chan_code in chan_codes and \
any(item in acceptible_channels for item in channel_list):
lats.append(sta.latitude)
lons.append(sta.longitude)
else:
lats.append(sta.latitude)
lons.append(sta.longitude)
return np.column_stack((lons, lats))
coords_original = get_latlon(inventory, check_channels=False)
coords_checkchannels = get_latlon(inventory, check_channels=True)
coords_checkfreq = get_latlon(inventory, check_codes=True)
coords_combcheck = get_latlon(inventory, check_channels=True, check_codes=True)
# set boundaries
bbox = [100, 179, -50, -30]
def remove_coords(coordinates, bbox):
"""
Function that removes coordinates from outside of a specified bbox.
coordinates: (2,N) numpy array, or python array or list
bbox: [xmin, xmax, ymin, ymax]
"""
xmin, xmax, ymin, ymax = bbox[0], bbox[1], bbox[2], bbox[3]
#convert to python list
coords = list(coordinates)
for i, coord in enumerate(coords):
if coord[0] < xmin or coord[0] > xmax or \
coord[1] < ymin or coord[1] > ymax:
del coords[i]
return np.asarray(coords)
coords_original = remove_coords(coords_original, bbox)
fig1 = plt.figure(figsize=(15,15))
plt.title('Locations of All Available NZ Geonet Seismic Stations')
plt.ylabel('Latitude (Degrees)')
plt.xlabel('Longitude (Degrees)')
plt.scatter(coords_original[:,0], coords_original[:,1])
fig1.savefig('NZ_Geonet_Stations.svg', format='SVG')
plt.clf
coords_checkchannels = remove_coords(coords_checkchannels, bbox)
fig2 = plt.figure(figsize=(15,15))
plt.title('Locations of All NZ Geonet Seismic Stations \n \
with Chosen List of Channel Names')
plt.ylabel('Latitude (Degrees)')
plt.xlabel('Longitude (Degrees)')
plt.scatter(coords_checkchannels[:,0], coords_checkchannels[:,1])
fig2.savefig('NZ_coords_checkchannels_Geonet_Stations.svg', format='SVG')
plt.clf
coords_checkfreq = remove_coords(coords_checkfreq, bbox)
fig3 = plt.figure(figsize=(15,15))
plt.title('Locations of All NZ Geonet Seismic Stations \n \
within Top 30% of Instrument Frequency Response Range')
plt.ylabel('Latitude (Degrees)')
plt.xlabel('Longitude (Degrees)')
plt.scatter(coords_checkfreq[:,0], coords_checkfreq[:,1])
fig3.savefig('NZ_coords_checkfreq_Geonet_Stations.svg', format='SVG')
plt.clf
coords_combcheck = remove_coords(coords_combcheck, bbox)
fig4 = plt.figure(figsize=(15,15))
plt.title('Locations of All NZ Geonet Seismic Stations \n \
with Combined Channel List and Frequency Response Range Checks')
plt.ylabel('Latitude (Degrees)')
plt.xlabel('Longitude (Degrees)')
plt.scatter(coords_combcheck[:,0], coords_combcheck[:,1])
fig4.savefig('NZ_coords_combcheck_Geonet_Stations.svg', format='SVG')
plt.clf
# mainland New Zealand Geonet 2014 operational station locations
NZ_COORDS = coords_combcheck
with open(u'NZ_COORDS.pickle', 'wb') as f:
pickle.dump(NZ_COORDS, f, protocol=2) | gpl-3.0 |
saifrahmed/bokeh | bokeh/tests/test_protocol.py | 42 | 3959 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
class TestBokehJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh.protocol import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_panda_series(self):
s = pd.Series([1, 3, 5, 6, 8])
self.assertEqual(self.encoder.default(s), [1, 3, 5, 6, 8])
def test_numpyarray(self):
a = np.arange(5)
self.assertEqual(self.encoder.default(a), [0, 1, 2, 3, 4])
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
self.assertEqual(self.encoder.default(npint), 1)
self.assertIsInstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
self.assertEqual(self.encoder.default(npfloat), 1.33)
self.assertIsInstance(self.encoder.default(npfloat), float)
def test_numpybool_(self):
nptrue = np.bool_(True)
self.assertEqual(self.encoder.default(nptrue), True)
self.assertIsInstance(self.encoder.default(nptrue), bool)
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_pd_timestamp(self):
ts = pd.tslib.Timestamp('April 28, 1948')
self.assertEqual(self.encoder.default(ts), -684115200000)
class TestSerializeJson(unittest.TestCase):
def setUp(self):
from bokeh.protocol import serialize_json, deserialize_json
self.serialize = serialize_json
self.deserialize = deserialize_json
def test_with_basic(self):
self.assertEqual(self.serialize({'test': [1, 2, 3]}), '{"test": [1, 2, 3]}')
def test_with_np_array(self):
a = np.arange(5)
self.assertEqual(self.serialize(a), '[0, 1, 2, 3, 4]')
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_with_pd_series(self):
s = pd.Series([0, 1, 2, 3, 4])
self.assertEqual(self.serialize(s), '[0, 1, 2, 3, 4]')
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_nans_and_infs_pandas(self):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_datetime_types(self):
"""should convert to millis
"""
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
shoyer/xray | xarray/tests/test_concat.py | 1 | 13577 | from copy import deepcopy
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable, concat
from xarray.core import dtypes
from . import (
InaccessibleArray, assert_array_equal,
assert_equal, assert_identical, raises_regex, requires_dask)
from .test_dataset import create_test_data
class TestConcatDataset(object):
def test_concat(self):
# TODO: simplify and split this test case
# drop the third dimension to keep things relatively understandable
data = create_test_data()
for k in list(data.variables):
if 'dim3' in data[k].dims:
del data[k]
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
assert_identical(data, concat(split_data, 'dim1'))
def rectify_dim_order(dataset):
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(dict((k, v.transpose(*data[k].dims))
for k, v in dataset.data_vars.items()),
dataset.coords, attrs=dataset.attrs)
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
assert_identical(data, concat(datasets, dim))
dim = 'dim2'
assert_identical(
data, concat(datasets, data[dim]))
assert_identical(
data, concat(datasets, data[dim], coords='minimal'))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in data.coords.items()
if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
assert_identical(data, rectify_dim_order(actual))
actual = concat(datasets, data[dim], coords='different')
assert_identical(data, rectify_dim_order(actual))
# make sure the coords argument behaves as expected
data.coords['extra'] = ('dim4', np.arange(3))
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords='all')
expected = np.array([data['extra'].values
for _ in range(data.dims[dim])])
assert_array_equal(actual['extra'].values, expected)
actual = concat(datasets, data[dim], coords='different')
assert_equal(data['extra'], actual['extra'])
actual = concat(datasets, data[dim], coords='minimal')
assert_equal(data['extra'], actual['extra'])
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data['dim1']).rename('dim1')
datasets = [g for _, g in data.groupby('dim1', squeeze=False)]
expected = data.copy()
expected['dim1'] = dim
assert_identical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ['minimal', 'different', 'all', [], ['foo']]:
actual = concat(objs, dim='x', data_vars=data_vars)
assert_identical(data, actual)
def test_concat_coords(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5))
objs = [data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1)]
for coords in ['different', 'all', ['c']]:
actual = concat(objs, dim='x', coords=coords)
assert_identical(expected, actual)
for coords in ['minimal', []]:
with raises_regex(ValueError, 'not equal across'):
concat(objs, dim='x', coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({'foo': 1.5}, {'y': 1})
ds2 = Dataset({'foo': 2.5}, {'y': 1})
expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})
for mode in ['different', 'all', ['foo']]:
actual = concat([ds1, ds2], 'y', data_vars=mode)
assert_identical(expected, actual)
with raises_regex(ValueError, 'not equal across datasets'):
concat([ds1, ds2], 'y', data_vars='minimal')
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, 'dim1')
assert_identical(data, actual)
actual = concat(split_data[::-1], 'dim1')
assert_identical(data, actual)
def test_concat_autoalign(self):
ds1 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 2])])})
ds2 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 3])])})
actual = concat([ds1, ds2], 'y')
expected = Dataset({'foo': DataArray([[1, 2, np.nan], [1, np.nan, 2]],
dims=['y', 'x'],
coords={'x': [1, 2, 3]})})
assert_identical(expected, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
with raises_regex(ValueError, 'must supply at least one'):
concat([], 'dim1')
with raises_regex(ValueError, 'are not coordinates'):
concat([data, data], 'new_dim', coords=['not_found'])
with raises_regex(ValueError, 'global attributes not'):
data0, data1 = deepcopy(split_data)
data1.attrs['foo'] = 'bar'
concat([data0, data1], 'dim1', compat='identical')
assert_identical(
data, concat([data0, data1], 'dim1', compat='equals'))
with raises_regex(ValueError, 'encountered unexpected'):
data0, data1 = deepcopy(split_data)
data1['foo'] = ('bar', np.random.randn(10))
concat([data0, data1], 'dim1')
with raises_regex(ValueError, 'compat.* invalid'):
concat(split_data, 'dim1', compat='foobar')
with raises_regex(ValueError, 'unexpected value for'):
concat([data, data], 'new_dim', coords='foobar')
with raises_regex(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({'x': [1]})], dim='z')
with raises_regex(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({}, {'x': 1})], dim='z')
with raises_regex(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', mode='different')
with raises_regex(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', concat_over='different')
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {'x': 0}), Dataset({'x': [1]})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]})
assert_identical(actual, expected)
objs = [Dataset({'x': [0]}), Dataset({}, {'x': 1})]
actual = concat(objs, 'x')
assert_identical(actual, expected)
# mixed dims between variables
objs = [Dataset({'x': [2], 'y': 3}), Dataset({'x': [4], 'y': 5})]
actual = concat(objs, 'x')
expected = Dataset({'x': [2, 4], 'y': ('x', [3, 5])})
assert_identical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1]}, {'y': ('x', [-2])})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]}, {'y': ('x', [-1, -2])})
assert_identical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1, 2]}, {'y': -2})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1, 2]}, {'y': ('x', [-1, -2, -2])})
assert_identical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [Dataset({'z': ('x', [-1])}, {'x': [0], 'y': [0]}),
Dataset({'z': ('y', [1])}, {'x': [1], 'y': [0]})]
actual = concat(objs, 'x')
expected = Dataset({'z': (('x', 'y'), [[-1], [1]])},
{'x': [0, 1], 'y': [0]})
assert_identical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 1, 't': [0]})]
expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})
actual = concat(objs, 't')
assert_identical(expected, actual)
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 2, 't': [0]})]
with pytest.raises(ValueError):
concat(objs, 't', coords='minimal')
def test_concat_dim_is_variable(self):
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
coord = Variable('y', [3, 4])
expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]})
actual = concat(objs, coord)
assert_identical(actual, expected)
def test_concat_multiindex(self):
x = pd.MultiIndex.from_product([[1, 2, 3], ['a', 'b']])
expected = Dataset({'x': x})
actual = concat([expected.isel(x=slice(2)),
expected.isel(x=slice(2, None))], 'x')
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
@pytest.mark.parametrize('fill_value', [dtypes.NA, 2, 2.0])
def test_concat_fill_value(self, fill_value):
datasets = [Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}),
Dataset({'a': ('x', [1, 2]), 'x': [0, 1]})]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = Dataset({'a': (('t', 'x'),
[[fill_value, 2, 3],
[1, 2, fill_value]])},
{'x': [0, 1, 2]})
actual = concat(datasets, dim='t', fill_value=fill_value)
assert_identical(actual, expected)
class TestConcatDataArray(object):
def test_concat(self):
ds = Dataset({'foo': (['x', 'y'], np.random.random((2, 3))),
'bar': (['x', 'y'], np.random.random((2, 3)))},
{'x': [0, 1]})
foo = ds['foo']
bar = ds['bar']
# from dataset array:
expected = DataArray(np.array([foo.values, bar.values]),
dims=['w', 'x', 'y'], coords={'x': [0, 1]})
actual = concat([foo, bar], 'w')
assert_equal(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby('x')]
stacked = concat(grouped, ds['x'])
assert_identical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes['x'])
assert_identical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])
).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
assert_identical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
assert_identical(expected, actual)
with raises_regex(ValueError, 'not identical'):
concat([foo, bar], dim='w', compat='identical')
with raises_regex(ValueError, 'not a valid argument'):
concat([foo, bar], dim='w', data_vars='minimal')
def test_concat_encoding(self):
# Regression test for GH1297
ds = Dataset({'foo': (['x', 'y'], np.random.random((2, 3))),
'bar': (['x', 'y'], np.random.random((2, 3)))},
{'x': [0, 1]})
foo = ds['foo']
foo.encoding = {"complevel": 5}
ds.encoding = {"unlimited_dims": 'x'}
assert concat([foo, foo], dim="x").encoding == foo.encoding
assert concat([ds, ds], dim="x").encoding == ds.encoding
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3),
dims=['x', 'y']) for _ in range(2)]
# should not raise
combined = concat(arrays, dim='z')
assert combined.shape == (2, 3, 3)
assert combined.dims == ('z', 'x', 'y')
@pytest.mark.parametrize('fill_value', [dtypes.NA, 2, 2.0])
def test_concat_fill_value(self, fill_value):
foo = DataArray([1, 2], coords=[('x', [1, 2])])
bar = DataArray([1, 2], coords=[('x', [1, 3])])
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = DataArray([[1, 2, fill_value], [1, fill_value, 2]],
dims=['y', 'x'], coords={'x': [1, 2, 3]})
actual = concat((foo, bar), dim='y', fill_value=fill_value)
assert_identical(actual, expected)
| apache-2.0 |
AnasGhrab/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
boisvert42/baseball-for-fun | on_pace/on_pace.py | 1 | 1487 | #%%
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import beta
import numpy as np
#%%
# Read in the CSV
# From https://github.com/chadwickbureau/baseballdatabank/blob/master/core/Teams.csv
r = pd.read_csv(r'Teams.csv')
# Restrict to just post 1961 (162-game schedule)
r = r.loc[r.yearID>=1961]
# Add a column for win percentage
r['WinPct']=r['W']/(r['W']+r['L'])
# Make histogram
ydata,xdata,_ = plt.hist(r['WinPct'],bins=75)
plt.title('Historical Winning Percentages')
plt.xlabel('Win Percentage')
plt.ylabel('Number of Teams')
plt.savefig('winpct.png',dpi=300)
#%%
# Mean and variance of win percentages
mu = r['WinPct'].mean()
v = r['WinPct'].var()
# Estimate alpha and beta from these
# Thanks https://stats.stackexchange.com/a/12239
alpha = mu**2 * ((1-mu)/v - 1/mu)
b = alpha * (1/mu - 1)
# Plot the beta distribution along with the normalized histogram
plt.hist(r['WinPct'],bins=75,normed=True)
x = np.linspace(0.2,0.8,num=100)
y = beta.pdf(x,alpha,b)
plt.plot(x,y,color='red',linewidth=3)
plt.savefig('beta.png',dpi=300)
#%%
# Update the prior and plot
alpha2 = alpha + 10
beta2 = b
y2 = beta.pdf(x,alpha2,beta2)
plt.plot(x,y,color='red',label='Prior')
plt.plot(x,y2,color='green',label='Posterior')
plt.legend(loc='upper left')
plt.savefig('posterior.png',dpi=300)
# New mean?
mean2 = alpha2/(alpha2+beta2)
print mean2
print mean2*162
# Apply to rest of season only
print mean2*152+10
# New mode
mymode = (alpha2-1)/(alpha2+beta2-2)
print mymode*162 | mit |
flightgong/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/tests/test_png.py | 5 | 1351 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import glob
import os
import numpy as np
from matplotlib.testing.decorators import image_comparison
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import sys
on_win = (sys.platform == 'win32')
@image_comparison(baseline_images=['pngsuite'], extensions=['png'],
tol=0.01 if on_win else 0)
def test_pngsuite():
dirname = os.path.join(
os.path.dirname(__file__),
'baseline_images',
'pngsuite')
files = glob.glob(os.path.join(dirname, 'basn*.png'))
files.sort()
fig = plt.figure(figsize=(len(files), 2))
for i, fname in enumerate(files):
data = plt.imread(fname)
cmap = None # use default colormap
if data.ndim == 2:
# keep grayscale images gray
cmap = cm.gray
plt.imshow(data, extent=[i, i + 1, 0, 1], cmap=cmap)
plt.gca().patch.set_facecolor("#ddffff")
plt.gca().set_xlim(0, len(files))
def test_imread_png_uint16():
from matplotlib import _png
img = _png.read_png_int(os.path.join(os.path.dirname(__file__),
'baseline_images/test_png/uint16.png'))
assert (img.dtype == np.uint16)
assert np.sum(img.flatten()) == 134184960
| apache-2.0 |
ngoix/OCRF | sklearn/tests/test_cross_validation.py | 24 | 47465 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
demorest/rtpipe | rtpipe/reproduce.py | 1 | 13178 | import numpy as np
import rtpipe.RT as rt
import rtpipe.parseparams as pp
import rtpipe.parsecands as pc
import pickle, logging, os
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def plot_cand(candsfile, candloc=[], candnum=-1, threshold=0, savefile=True, returndata=False, outname='', **kwargs):
""" Reproduce detection of a single candidate for plotting or inspection.
candsfile can be merge or single-scan cands pkl file. Difference defined by presence of scan in d['featureind'].
candloc reproduces candidate at given location (scan, segment, integration, dmind, dtind, beamnum).
candnum selects one to reproduce from ordered list
threshold is min of sbs(SNR) used to filter candidates to select with candnum.
savefile/outname define if/how to save png of candidate
if returndata, (im, data) returned.
kwargs passed to rt.set_pipeline
"""
# get candidate info
loc, prop = pc.read_candidates(candsfile)
# define state dict and overload with user prefs
d0 = pickle.load(open(candsfile, 'r'))
for key in kwargs:
logger.info('Setting %s to %s' % (key, kwargs[key]))
d0[key] = kwargs[key]
d0['logfile'] = False # no need to save log
# feature columns
if 'snr2' in d0['features']:
snrcol = d0['features'].index('snr2')
elif 'snr1' in d0['features']:
snrcol = d0['features'].index('snr1')
if 'l2' in d0['features']:
lcol = d0['features'].index('l2')
elif 'l1' in d0['features']:
lcol = d0['features'].index('l1')
if 'm2' in d0['features']:
mcol = d0['features'].index('m2')
elif 'm1' in d0['features']:
mcol = d0['features'].index('m1')
try:
scancol = d0['featureind'].index('scan') # if merged pkl
except ValueError:
scancol = -1 # if single-scan pkl
segmentcol = d0['featureind'].index('segment')
intcol = d0['featureind'].index('int')
dtindcol = d0['featureind'].index('dtind')
dmindcol = d0['featureind'].index('dmind')
# sort and prep candidate list
snrs = prop[:, snrcol]
select = np.where(np.abs(snrs) > threshold)[0]
loc = loc[select]
prop = prop[select]
times = pc.int2mjd(d0, loc)
times = times - times[0]
# default case will print cand info
if (candnum < 0) and (not len(candloc)):
logger.info('Getting candidates...')
logger.info('candnum: loc, SNR, DM (pc/cm3), time (s; rel)')
for i in range(len(loc)):
logger.info("%d: %s, %.1f, %.1f, %.1f" % (i, str(loc[i]), prop[i, snrcol], np.array(d0['dmarr'])[loc[i,dmindcol]], times[i]))
else: # if candnum or candloc provided, try to reproduce
if (candnum >= 0) and not len(candloc):
logger.info('Reproducing and visualizing candidate %d at %s with properties %s.' % (candnum, loc[candnum], prop[candnum]))
dmarrorig = d0['dmarr']
dtarrorig = d0['dtarr']
if scancol >= 0: # here we have a merge pkl
scan = loc[candnum, scancol]
else: # a scan-based cands pkl
scan = d0['scan']
segment = loc[candnum, segmentcol]
candint = loc[candnum, intcol]
dmind = loc[candnum, dmindcol]
dtind = loc[candnum, dtindcol]
beamnum = 0
candloc = (scan, segment, candint, dmind, dtind, beamnum)
elif len(candloc) and (candnum < 0):
assert len(candloc) == 6, 'candloc should be length 6 ( scan, segment, candint, dmind, dtind, beamnum ).'
logger.info('Reproducing and visualizing candidate %d at %s' % (candnum, candloc))
dmarrorig = d0['dmarr']
dtarrorig = d0['dtarr']
scan, segment, candint, dmind, dtind, beamnum = candloc
else:
raise Exception, 'Provide candnum or candloc, not both'
# if working locally, set workdir appropriately. Can also be used in queue system with full path given.
if not os.path.dirname(candsfile):
d0['workdir'] = os.getcwd()
else:
d0['workdir'] = os.path.dirname(candsfile)
filename = os.path.join(d0['workdir'], os.path.basename(d0['filename']))
# clean up d0 of superfluous keys
params = pp.Params() # will be used as input to rt.set_pipeline
for key in d0.keys():
if not hasattr(params, key) and 'memory_limit' not in key:
_ = d0.pop(key)
d0['npix'] = 0
d0['uvres'] = 0
d0['nsegments'] = 0
d0['logfile'] = False
# get cand data
d = rt.set_pipeline(filename, scan, **d0)
im, data = rt.pipeline_reproduce(d, candloc, product='imdata') # removed loc[candnum]
# optionally plot
if savefile:
loclabel = scan, segment, candint, dmind, dtind, beamnum
make_cand_plot(d, im, data, loclabel, outname=outname)
# optionally return data
if returndata:
return (im, data)
def refine_cand(candsfile, candloc=[], threshold=0):
""" Helper function to interact with merged cands file and refine analysis
candsfile is merged pkl file
candloc (scan, segment, candint, dmind, dtind, beamnum) is as above.
if no candloc, then it prints out cands above threshold.
"""
if not candloc:
plot_cand(candsfile, candloc=[], candnum=-1, threshold=threshold,
savefile=False, returndata=False)
else:
d = pickle.load(open(candsfile, 'r'))
cands = rt.pipeline_refine(d, candloc)
return cands
def make_cand_plot(d, im, data, loclabel, outname=''):
""" Builds candidate plot.
Expects phased, dedispersed data (cut out in time, dual-pol), image, and metadata
loclabel is used to label the plot with (scan, segment, candint, dmind, dtind, beamnum).
"""
# given d, im, data, make plot
logger.info('Plotting...')
logger.debug('(image, data) shape: (%s, %s)' % (str(im.shape), str(data.shape)))
assert len(loclabel) == 6, 'loclabel should have (scan, segment, candint, dmind, dtind, beamnum)'
scan, segment, candint, dmind, dtind, beamnum = loclabel
# calc source location
snrmin = im.min()/im.std()
snrmax = im.max()/im.std()
if snrmax > -1*snrmin:
l1, m1 = rt.calc_lm(d, im, minmax='max')
snrobs = snrmax
else:
l1, m1 = rt.calc_lm(d, im, minmax='min')
snrobs = snrmin
pt_ra, pt_dec = d['radec']
src_ra, src_dec = source_location(pt_ra, pt_dec, l1, m1)
logger.info('Peak (RA, Dec): %s, %s' % (src_ra, src_dec))
# build plot
fig = plt.Figure(figsize=(8.5,8))
ax = fig.add_subplot(221, axisbg='white')
# add annotating info
ax.text(0.1, 0.9, d['fileroot'], fontname='sans-serif', transform = ax.transAxes)
ax.text(0.1, 0.8, 'sc %d, seg %d, int %d, DM %.1f, dt %d' % (scan, segment, candint, d['dmarr'][dmind], d['dtarr'][dtind]), fontname='sans-serif', transform = ax.transAxes)
ax.text(0.1, 0.7, 'Peak: (' + str(np.round(l1, 3)) + ' ,' + str(np.round(m1, 3)) + '), SNR: ' + str(np.round(snrobs, 1)), fontname='sans-serif', transform = ax.transAxes)
# plot dynamic spectra
left, width = 0.6, 0.2
bottom, height = 0.2, 0.7
rect_dynsp = [left, bottom, width, height]
rect_lc = [left, bottom-0.1, width, 0.1]
rect_sp = [left+width, bottom, 0.1, height]
ax_dynsp = fig.add_axes(rect_dynsp)
ax_lc = fig.add_axes(rect_lc)
ax_sp = fig.add_axes(rect_sp)
spectra = np.swapaxes(data.real,0,1) # seems that latest pickle actually contains complex values in spectra...
dd = np.concatenate( (spectra[...,0], np.zeros_like(spectra[...,0]), spectra[...,1]), axis=1) # make array for display with white space between two pols
impl = ax_dynsp.imshow(dd, origin='lower', interpolation='nearest', aspect='auto', cmap=plt.get_cmap('Greys'))
ax_dynsp.text(0.5, 0.95, 'RR LL', horizontalalignment='center', verticalalignment='center', fontsize=16, color='w', transform = ax_dynsp.transAxes)
ax_dynsp.set_yticks(range(0,len(d['freq']),30))
ax_dynsp.set_yticklabels(d['freq'][::30])
ax_dynsp.set_ylabel('Freq (GHz)')
ax_dynsp.set_xlabel('Integration (rel)')
spectrum = spectra[:,len(spectra[0])/2].mean(axis=1) # assume pulse in middle bin. get stokes I spectrum. **this is wrong in a minority of cases.**
ax_sp.plot(spectrum, range(len(spectrum)), 'k.')
ax_sp.plot(np.zeros(len(spectrum)), range(len(spectrum)), 'k:')
ax_sp.set_ylim(0, len(spectrum))
ax_sp.set_yticklabels([])
xmin,xmax = ax_sp.get_xlim()
ax_sp.set_xticks(np.linspace(xmin,xmax,3).round(2))
ax_sp.set_xlabel('Flux (Jy)')
lc = dd.mean(axis=0)
lenlc = len(data) # old (stupid) way: lenlc = np.where(lc == 0)[0][0]
ax_lc.plot(range(0,lenlc)+range(2*lenlc,3*lenlc), list(lc)[:lenlc] + list(lc)[-lenlc:], 'k.')
ax_lc.plot(range(0,lenlc)+range(2*lenlc,3*lenlc), list(np.zeros(lenlc)) + list(np.zeros(lenlc)), 'k:')
ax_lc.set_xlabel('Integration')
ax_lc.set_ylabel('Flux (Jy)')
ax_lc.set_xticks([0,0.5*lenlc,lenlc,1.5*lenlc,2*lenlc,2.5*lenlc,3*lenlc])
ax_lc.set_xticklabels(['0',str(lenlc/2),str(lenlc),'','0',str(lenlc/2),str(lenlc)])
ymin,ymax = ax_lc.get_ylim()
ax_lc.set_yticks(np.linspace(ymin,ymax,3).round(2))
# image
ax = fig.add_subplot(223)
fov = np.degrees(1./d['uvres'])*60.
impl = ax.imshow(im.transpose(), aspect='equal', origin='upper',
interpolation='nearest', extent=[fov/2, -fov/2, -fov/2, fov/2],
cmap=plt.get_cmap('Greys'), vmin=0, vmax=0.5*im.max())
ax.set_xlabel('RA Offset (arcmin)')
ax.set_ylabel('Dec Offset (arcmin)')
if not outname:
outname = os.path.join(d['workdir'],
'cands_{}_sc{}-seg{}-i{}-dm{}-dt{}.png'.format(d['fileroot'], scan,
segment, candint, dmind, dtind))
try:
canvas = FigureCanvasAgg(fig)
canvas.print_figure(outname)
except ValueError:
logger.warn('Could not write figure to %s' % outname)
def convertloc(candsfile, candloc, memory_limit):
""" For given state and location that are too bulky, calculate new location given memory_limit. """
scan, segment, candint, dmind, dtind, beamnum = candloc
# set up state and find absolute integration of candidate
d0 = pickle.load(open(candsfile, 'r'))
filename = os.path.basename(d0['filename'])
readints0 = d0['readints']
nskip0 = (24*3600*(d0['segmenttimes'][segment, 0]
- d0['starttime_mjd'])
/ d0['inttime']).astype(int)
candint_abs = nskip0 + candint
logger.debug('readints0 {} nskip0 {}, candint_abs {}'.format(readints0, nskip0, candint_abs))
# clean up d0 and resubmit to set_pipeline
params = pp.Params()
for key in d0.keys():
if not hasattr(params, key):
_ = d0.pop(key)
d0['logfile'] = False
d0['npix'] = 0
d0['uvres'] = 0
d0['nsegments'] = 0
d0['memory_limit'] = memory_limit
d = rt.set_pipeline(os.path.basename(filename), scan, **d0)
# find best segment for new state
readints = d['readints']
nskips = [(24*3600*(d['segmenttimes'][segment, 0]
- d['starttime_mjd']) / d['inttime']).astype(int)
for segment in range(d['nsegments'])]
posind = [i for i in range(len(nskips)) if candint_abs - nskips[i] > 0]
segment_new = [seg for seg in posind if candint_abs - nskips[seg] == min([candint_abs - nskips[i] for i in posind])][0]
candint_new = candint_abs - nskips[segment_new]
logger.debug('nskips {}, segment_new {}'.format(nskips, segment_new))
return [scan, segment_new, candint_new, dmind, dtind, beamnum]
def source_location(pt_ra, pt_dec, l1, m1):
""" Takes phase center and src l,m in radians to get ra,dec of source.
Returns string ('hh mm ss', 'dd mm ss')
"""
import math
srcra = np.degrees(pt_ra + l1/math.cos(pt_dec))
srcdec = np.degrees(pt_dec + m1)
return deg2HMS(srcra, srcdec)
def deg2HMS(ra='', dec='', round=False):
""" quick and dirty coord conversion. googled to find bdnyc.org.
"""
RA, DEC, rs, ds = '', '', '', ''
if dec:
if str(dec)[0] == '-':
ds, dec = '-', abs(dec)
deg = int(dec)
decM = abs(int((dec-deg)*60))
if round:
decS = int((abs((dec-deg)*60)-decM)*60)
else:
decS = (abs((dec-deg)*60)-decM)*60
DEC = '{0}{1} {2} {3}'.format(ds, deg, decM, decS)
if ra:
if str(ra)[0] == '-':
rs, ra = '-', abs(ra)
raH = int(ra/15)
raM = int(((ra/15)-raH)*60)
if round:
raS = int(((((ra/15)-raH)*60)-raM)*60)
else:
raS = ((((ra/15)-raH)*60)-raM)*60
RA = '{0}{1} {2} {3}'.format(rs, raH, raM, raS)
if ra and dec:
return (RA, DEC)
else:
return RA or DEC
| bsd-3-clause |
pxzhang94/GAN | GAN/auxiliary_classifier_gan/ac_gan_pytorch.py | 1 | 3659 | import torch
import torch.nn.functional as nn
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
z_dim = 16
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
cnt = 0
lr = 1e-3
eps = 1e-8
G_ = torch.nn.Sequential(
torch.nn.Linear(z_dim + y_dim, h_dim),
torch.nn.PReLU(),
torch.nn.Linear(h_dim, X_dim),
torch.nn.Sigmoid()
)
def G(z, c):
inputs = torch.cat([z, c], 1)
return G_(inputs)
D_shared = torch.nn.Sequential(
torch.nn.Linear(X_dim, h_dim),
torch.nn.PReLU()
)
D_gan = torch.nn.Sequential(
torch.nn.Linear(h_dim, 1),
torch.nn.Sigmoid()
)
D_aux = torch.nn.Sequential(
torch.nn.Linear(h_dim, y_dim),
torch.nn.Softmax()
)
def D(X):
h = D_shared(X)
return D_gan(h), D_aux(h)
nets = [G_, D_shared, D_gan, D_aux]
G_params = G_.parameters()
D_params = (list(D_shared.parameters()) + list(D_gan.parameters()) +
list(D_aux.parameters()))
def reset_grad():
for net in nets:
net.zero_grad()
G_solver = optim.Adam(G_params, lr=lr)
D_solver = optim.Adam(D_params, lr=lr)
for it in range(100000):
# Sample data
X, y = mnist.train.next_batch(mb_size)
X = Variable(torch.from_numpy(X))
# c is one-hot
c = Variable(torch.from_numpy(y.astype('float32')))
# y_true is not one-hot (requirement from nn.cross_entropy)
y_true = Variable(torch.from_numpy(y.argmax(axis=1).astype('int')))
# z noise
z = Variable(torch.randn(mb_size, z_dim))
""" Discriminator """
G_sample = G(z, c)
D_real, C_real = D(X)
D_fake, C_fake = D(G_sample)
# GAN's D loss
D_loss = torch.mean(torch.log(D_real + eps) + torch.log(1 - D_fake + eps))
# Cross entropy aux loss
C_loss = -nn.cross_entropy(C_real, y_true) - nn.cross_entropy(C_fake, y_true)
# Maximize
DC_loss = -(D_loss + C_loss)
DC_loss.backward()
D_solver.step()
reset_grad()
""" Generator """
G_sample = G(z, c)
D_fake, C_fake = D(G_sample)
_, C_real = D(X)
# GAN's G loss
G_loss = torch.mean(torch.log(D_fake + eps))
# Cross entropy aux loss
C_loss = -nn.cross_entropy(C_real, y_true) - nn.cross_entropy(C_fake, y_true)
# Maximize
GC_loss = -(G_loss + C_loss)
GC_loss.backward()
G_solver.step()
reset_grad()
# Print and plot every now and then
if it % 1000 == 0:
idx = np.random.randint(0, 10)
c = np.zeros([16, y_dim])
c[range(16), idx] = 1
c = Variable(torch.from_numpy(c.astype('float32')))
z = Variable(torch.randn(16, z_dim))
samples = G(z, c).data.numpy()
print('Iter-{}; D_loss: {:.4}; G_loss: {:.4}; Idx: {}'
.format(it, -D_loss.data[0], -G_loss.data[0], idx))
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
if not os.path.exists('out/'):
os.makedirs('out/')
plt.savefig('out/{}.png'
.format(str(cnt).zfill(3)), bbox_inches='tight')
cnt += 1
plt.close(fig)
| apache-2.0 |
ksluckow/log2model | tools/animator_multi.py | 2 | 1550 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
import sys
array = []
totalargs = len(sys.argv)
for i in xrange(1,totalargs):
print sys.argv[i]
array.append(np.genfromtxt(sys.argv[i], delimiter=','))
plotlays, plotcols = [2,5], ["black","red"]
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
xmin = 0
xmax = 0
ymin = 0
ymax = 0
for data in array:
currxmax = max(data[:,0])
currxmin = min(data[:,0])
if currxmax > xmax:
xmax = currxmax
if currxmin < xmin:
xmin = currxmin
currymax = max(data[:,1])
currymin = min(data[:,1])
if currymax > ymax:
ymax = currymax
if currymin < ymin:
ymin = currymin
xdelta = 1000
ydelta = 1000
ax = plt.axes(xlim=(xmin - xdelta,xmax + xdelta), ylim=(ymin - ydelta ,ymax + ydelta))
line, = ax.plot([], [], lw=2)
# initialization function: plot the background of each frame
lines = []
for i in array:
lobj = ax.plot([],[],lw=2)[0]
lines.append(lobj)
def init():
for line in lines:
line.set_data([],[])
return lines,
# animation function. This is called sequentially
def animate(i):
for s, data in enumerate(array):
x = data[0:i,0]
y = data[0:i,1]
lines[s].set_data(x, y)
return lines,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=10000, interval=10, blit=False)
plt.grid()
plt.show()
| apache-2.0 |
SitiBanc/1061_NCTU_IOMDS | 1025/Homework 5/HW5_3.py | 1 | 7022 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 25 23:33:46 2017
@author: sitibanc
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# =============================================================================
# Read CSV
# =============================================================================
df = pd.read_csv('TXF20112015.csv', sep=',', header = None) # dataframe (time, close, open, high, low, volume)
TAIEX = df.values # ndarray
tradeday = list(set(TAIEX[:, 0] // 10000)) # 交易日(YYYYMMDD)
tradeday.sort()
# =============================================================================
# Strategy 3.0: 開盤買進一口,n點停損,m點停利,收盤平倉,m >= n
# =============================================================================
profit0 = np.zeros((len(tradeday), 1))
tmp_profit0 = np.zeros((len(tradeday), 1))
best0 = [0] * 3 # [n, m, profit]
count = 0
for n in range(10, 110, 10):
for m in range(n , 110, 10):
for i in range(len(tradeday)):
date = tradeday[i]
idx = np.nonzero(TAIEX[:, 0] // 10000 == date)[0]
idx.sort()
p1 = TAIEX[idx[0], 2]
# 設定停損點
idx2 = np.nonzero(TAIEX[idx, 4] <= p1 - n)[0] # 最低價跌破停損點
# 設定停利點
idx3 = np.nonzero(TAIEX[idx, 3] >= p1 + m)[0] # 最高價衝破停利點
if len(idx2) == 0 and len(idx3) == 0: # 當日沒有觸及平損停利點
p2 = TAIEX[idx[-1], 1] # 當日收盤價賣出
elif len(idx3) == 0: # 當日沒有停利但停損
p2 = TAIEX[idx[idx2[0]], 1] # 停損點收盤價賣出
elif len(idx2) == 0: # 當日沒有停損但停利
p2 = TAIEX[idx[idx3[0]], 1] # 停利點收盤價賣出
elif idx2[0] > idx3[0]: # 當日停利點先出現
p2 = TAIEX[idx[idx3[0]], 1] # 停利點收盤價賣出
else: # 當日停損點先出現
p2 = TAIEX[idx[idx2[0]], 1] # 停損點收盤價賣出
tmp_profit0[i] = p2 - p1
# 選擇最好的m, n
if best0[2] < np.sum(tmp_profit0):
profit0 = np.hstack((profit0, tmp_profit0))
best0[0] = n
best0[1] = m
best0[2] = np.sum(tmp_profit0)
profit0 = profit0[:, -1]
print('Strategy 3.0: 當日以開盤價買進一口,', best0[0], '點停損,', best0[1], '點停利,當日收盤價平倉\n逐日損益折線圖')
profit02 = np.cumsum(profit0) # 逐日損益獲利
plt.plot(profit02) # 逐日損益折線圖
plt.show()
print('每日損益分佈圖')
plt.hist(profit0, bins = 100) # 每日損益的分佈圖(直方圖)
plt.show()
# 計算數據
ans1 = len(profit0) # 進場次數
ans2 = profit02[-1] # 總損益點數
ans3 = np.sum(profit0 > 0) / ans1 * 100 # 勝率
ans4 = np.mean(profit0[profit0 > 0]) # 獲利時的平均獲利點數
ans5 = np.mean(profit0[profit0 <= 0]) # 虧損時的平均虧損點數
print('進場次數:', ans1, '\n總損益點數:', ans2, '\n勝率:', ans3, '%')
print('賺錢時平均每次獲利點數', ans4, '\n輸錢時平均每次損失點數:', ans5, '\n')
# =============================================================================
# Strategy 3.1: 開盤賣出一口,n點停損,m點停利,收盤平倉,m >= n
# =============================================================================
profit1 = np.zeros((len(tradeday),1))
tmp_profit1 = np.zeros((len(tradeday), 1))
best1 = [0] * 3
for n in range(10, 110, 10):
for m in range(n , 110, 10):
for i in range(len(tradeday)):
date = tradeday[i]
idx = np.nonzero(TAIEX[:, 0] // 10000 == date)[0]
idx.sort()
p1 = TAIEX[idx[0], 2]
# 設定停損點
idx2 = np.nonzero(TAIEX[idx, 3] >= p1 + n)[0] # 最高價衝破停損點
# 設定停利點
idx3 = np.nonzero(TAIEX[idx, 4] <= p1 - m)[0] # 最低價跌破停利點
if len(idx2) == 0 and len(idx3) == 0: # 當日沒有觸及平損停利點
p2 = TAIEX[idx[-1], 1] # 當日收盤價買回
elif len(idx3) == 0: # 當日沒有停利但停損
p2 = TAIEX[idx[idx2[0]], 1] # 停損點收盤價買回
elif len(idx2) == 0: # 當日沒有停損但停利
p2 = TAIEX[idx[idx3[0]], 1] # 停利點收盤價買回
elif idx2[0] > idx3[0]: # 當日停利點先出現
p2 = TAIEX[idx[idx3[0]], 1] # 停利點收盤價買回
else: # 當日停損點先出現
p2 = TAIEX[idx[idx2[0]], 1] # 停損點收盤價買回
tmp_profit1[i] = p1 - p2
# 選擇最好的m, n
if best1[2] < np.sum(tmp_profit1):
best1[0] = n
best1[1] = m
best1[2] = np.sum(tmp_profit1)
profit1 = np.hstack((profit1, tmp_profit1))
profit1 = profit1[:, -1]
print('Strategy 3.1: 當日以開盤價賣出一口,', best1[0], '點停損,', best1[1], '點停利,當日收盤價平倉\n逐利損益折線圖')
profit12 = np.cumsum(profit1) # 逐日累積損益
plt.plot(profit12) # 逐日損益折線圖
plt.show()
print('每日損益分佈圖')
plt.hist(profit1, bins = 100) # 每日損益的分佈圖
plt.show()
# 計算數據
ans1 = len(profit1) # 進場次數
ans2 = profit12[-1] # 總損益點數
ans3 = np.sum(profit1 > 0) / ans1 * 100 # 勝率
ans4 = np.mean(profit1[profit1 > 0]) # 獲利時的平均獲利點數
ans5 = np.mean(profit1[profit1 <= 0]) # 虧損時的平均虧損點數
print('進場次數:', ans1, '\n總損益點數:', ans2, '\n勝率:', ans3, '%')
print('賺錢時平均每次獲利點數', ans4, '\n輸錢時平均每次損失點數:', ans5) | apache-2.0 |
spennihana/h2o-3 | h2o-docs/src/booklets/v2_2015/source/python/python_scikit_learn_pipeline.py | 4 | 4072 | In [41]: from h2o.transforms.preprocessing import H2OScaler
In [42]: from sklearn.pipeline import Pipeline
In [44]: # Turn off h2o progress bars
In [45]: h2o.__PROGRESS_BAR__=False
In [46]: h2o.no_progress()
In [47]: # build transformation pipeline using sklearn's Pipeline and H2O transforms
In [48]: pipeline = Pipeline([("standardize", H2OScaler()),
....: ("pca", H2OPrincipalComponentAnalysisEstimator(k=2)),
....: ("gbm", H2OGradientBoostingEstimator(distribution="multinomial"))])
In [49]: pipeline.fit(iris_df[:4],iris_df[4])
Out[49]: Model Details
=============
H2OPCA : Principal Component Analysis
Model Key: PCA_model_python_1446220160417_32
Importance of components:
pc1 pc2
---------------------- -------- ---------
Standard deviation 3.22082 0.34891
Proportion of Variance 0.984534 0.0115538
Cumulative Proportion 0.984534 0.996088
ModelMetricsPCA: pca
** Reported on train data. **
MSE: NaN
RMSE: NaN
Model Details
=============
H2OGradientBoostingEstimator : Gradient Boosting Machine
Model Key: GBM_model_python_1446220160417_34
Model Summary:
number_of_trees number_of_internal_trees model_size_in_bytes min_depth max_depth mean_depth min_leaves max_leaves mean_leaves
-- ----------------- ------------------------- --------------------- ----------- ----------- ------------ ------------ ------------ -------------
50 150 28170 1 5 4.84 2 13 9.97333
ModelMetricsMultinomial: gbm
** Reported on train data. **
MSE: 0.00162796447355
RMSE: 0.0403480417561
LogLoss: 0.0152718656454
Mean Per-Class Error: 0.0
Confusion Matrix: vertical: actual; across: predicted
Iris-setosa Iris-versicolor Iris-virginica Error Rate
------------- ----------------- ---------------- ------- -------
50 0 0 0 0 / 50
0 50 0 0 0 / 50
0 0 50 0 0 / 50
50 50 50 0 0 / 150
Top-3 Hit Ratios:
k hit_ratio
--- -----------
1 1
2 1
3 1
Scoring History:
timestamp duration number_of_trees training_rmse training_logloss training_classification_error
--- ------------------- ---------- ----------------- ---------------- ------------------ -------------------------------
2016-08-25 13:50:21 0.006 sec 0.0 0.666666666667 1.09861228867 0.66
2016-08-25 13:50:21 0.077 sec 1.0 0.603019288754 0.924249463924 0.04
2016-08-25 13:50:21 0.096 sec 2.0 0.545137025745 0.788619346614 0.04
2016-08-25 13:50:21 0.110 sec 3.0 0.492902188607 0.679995476522 0.04
2016-08-25 13:50:21 0.123 sec 4.0 0.446151758168 0.591313596193 0.04
--- --- --- --- --- --- ---
2016-08-25 13:50:21 0.419 sec 46.0 0.0489303232171 0.0192767805328 0.0
2016-08-25 13:50:21 0.424 sec 47.0 0.0462779490149 0.0180720396825 0.0
2016-08-25 13:50:21 0.429 sec 48.0 0.0444689238255 0.0171428314531 0.0
2016-08-25 13:50:21 0.434 sec 49.0 0.0423442541538 0.0161938230172 0.0
2016-08-25 13:50:21 0.438 sec 50.0 0.0403480417561 0.0152718656454 0.0
Variable Importances:
variable relative_importance scaled_importance percentage
---------- --------------------- ------------------- ------------
PC1 448.958 1 0.982184
PC2 8.1438 0.0181393 0.0178162
Pipeline(steps=[('standardize', <h2o.transforms.preprocessing.H2OScaler object at 0x1088c6a50>), ('pca', ), ('gbm', )]) | apache-2.0 |
AlexRobson/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
Unidata/pyCWT | cwt.py | 1 | 26078 | import numpy as np
from scipy.fftpack import fft, ifft, fftshift
__all__ = ['cwt', 'ccwt', 'icwt', 'SDG', 'Morlet']
class MotherWavelet(object):
"""Class for MotherWavelets.
Contains methods related to mother wavelets. Also used to ensure that new
mother wavelet objects contain the minimum requirements to be used in the
cwt related functions.
"""
@staticmethod
def get_coefs(self):
"""Raise error if method for calculating mother wavelet coefficients is
missing!
"""
raise NotImplementedError('get_coefs needs to be implemented for the mother wavelet')
@staticmethod
def get_coi_coef(sampf):
"""Raise error if Cone of Influence coefficient is not set in
subclass wavelet. To follow the convention in the literature, please define your
COI coef as a function of period, not scale - this will ensure
compatibility with the scalogram method.
"""
raise NotImplementedError('coi_coef needs to be implemented in subclass wavelet')
#add methods for computing cone of influence and mask
def get_coi(self):
"""Compute cone of influence."""
y1 = self.coi_coef * np.arange(0, self.len_signal / 2)
y2 = -self.coi_coef * np.arange(0, self.len_signal / 2) + y1[-1]
coi = np.r_[y1, y2]
self.coi = coi
return coi
def get_mask(self):
"""Get mask for cone of influence.
Sets self.mask as an array of bools for use in np.ma.array('', mask=mask)
"""
mask = np.ones(self.coefs.shape)
masks = self.coi_coef * self.scales
for s in range(0, len(self.scales)):
if (s != 0) and (int(np.ceil(masks[s])) < mask.shape[1]):
mask[s,np.ceil(int(masks[s])):-np.ceil(int(masks[s]))] = 0
self.mask = mask.astype(bool)
return self.mask
class SDG(MotherWavelet):
"""Class for the SDG MotherWavelet (a subclass of MotherWavelet).
SDG(self, len_signal = None, pad_to = None, scales = None, sampf = 1,
normalize = True, fc = 'bandpass')
Parameters
----------
len_signal : int
Length of time series to be decomposed.
pad_to : int
Pad time series to a total length `pad_to` using zero padding (note,
the signal will be zero padded automatically during continuous wavelet
transform if pad_to is set). This is used in the fft function when
performing the convolution of the wavelet and mother wavelet in Fourier
space.
scales : array
Array of scales used to initialize the mother wavelet.
sampf : float
Sample frequency of the time series to be decomposed.
normalize : bool
If True, the normalized version of the mother wavelet will be used (i.e.
the mother wavelet will have unit energy).
fc : string
Characteristic frequency - use the 'bandpass' or 'center' frequency of
the Fourier spectrum of the mother wavelet to relate scale to period
(default is 'bandpass').
Returns
-------
Returns an instance of the MotherWavelet class which is used in the cwt and
icwt functions.
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10),normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram()
Notes
-----
None
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
def __init__(self,len_signal=None,pad_to=None,scales=None,sampf=1,normalize=True, fc = 'bandpass'):
"""Initilize SDG mother wavelet"""
self.name='second degree of a Gaussian (mexican hat)'
self.sampf = sampf
self.scales = scales
self.len_signal = len_signal
self.normalize = normalize
#set total length of wavelet to account for zero padding
if pad_to is None:
self.len_wavelet = len_signal
else:
self.len_wavelet = pad_to
#set admissibility constant
if normalize:
self.cg = 4 * np.sqrt(np.pi) / 3.
else:
self.cg = np.pi
#define characteristic frequency
if fc is 'bandpass':
self.fc = np.sqrt(5./2.) * self.sampf/(2 * np.pi)
elif fc is 'center':
self.fc = np.sqrt(2.) * self.sampf / (2 * np.pi)
else:
raise CharacteristicFrequencyError("fc = %s not defined"%(fc,))
# coi_coef defined under the assumption that period is used, not scale
self.coi_coef = 2 * np.pi * np.sqrt(2. / 5.) * self.fc # Torrence and
# Compo 1998
# compute coefficients for the dilated mother wavelet
self.coefs = self.get_coefs()
def get_coefs(self):
"""Calculate the coefficients for the SDG mother wavelet"""
# Create array containing values used to evaluate the wavelet function
xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.)
# find mother wavelet coefficients at each scale
xsd = -xi * xi / (self.scales[:,np.newaxis] * self.scales[:,np.newaxis])
if self.normalize is True:
c=2. / (np.sqrt(3) * np.power(np.pi, 0.25))
else:
c=1.
mw = c * (1. + xsd) * np.exp(xsd / 2.)
self.coefs = mw
return mw
class Morlet(MotherWavelet):
"""Class for the Morlet MotherWavelet (a subclass of MotherWavelet).
Morlet(self, len_signal = None, pad_to = None, scales = None,
sampf = 1, f0 = 0.849)
Parameters
----------
len_signal : int
Length of time series to be decomposed.
pad_to : int
Pad time series to a total length `pad_to` using zero padding (note,
the signal will be zero padded automatically during continuous wavelet
transform if pad_to is set). This is used in the fft function when
performing the convolution of the wavelet and mother wavelet in Fourier
space.
scales : array
Array of scales used to initialize the mother wavelet.
sampf : float
Sample frequency of the time series to be decomposed.
f0 : float
Central frequency of the Morlet mother wavelet. The Fourier spectrum of
the Morlet wavelet appears as a Gaussian centered on f0. f0 defaults
to a value of 0.849 (the angular frequency would be ~5.336).
Returns
-------
Returns an instance of the MotherWavelet class which is used in the cwt
and icwt functions.
Examples
--------
Create instance of Morlet mother wavelet using 10 scales, perform the
continuous wavelet transform, and plot the resulting scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = Morlet(len_signal=len(data), scales = np.arange(10))
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram()
Notes
-----
* Morlet wavelet is defined as having unit energy, so the `normalize` flag
will always be set to True.
* The Morlet wavelet will always use f0 as it's characteristic frequency, so
fc is set as f0.
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
def __init__(self, len_signal=None, pad_to=None, scales=None, sampf=1,
normalize=True, f0=0.849):
"""Initilize Morlet mother wavelet."""
from scipy.integrate import trapz
from scipy.integrate import quad, Inf
self.sampf = sampf
self.scales = scales
self.len_signal = len_signal
self.normalize = True
self.name = 'Morlet'
# set total length of wavelet to account for zero padding
if pad_to is None:
self.len_wavelet = len_signal
else:
self.len_wavelet = pad_to
# define characteristic frequency
self.fc = f0
# Cone of influence coefficient
self.coi_coef = 2. * self.sampf / (self.fc + np.sqrt(2. + self.fc**2) *
np.sqrt(2)); #Torrence and Compo 1998 (in code)
# set admissibility constant
# based on the simplified Morlet wavelet energy spectrum
# in Addison (2002), eqn (2.39) - should be ok for f0 >0.84
# FIXED using quad 04/01/2011
#f = np.arange(0.001, 50, 0.001)
#y = 2. * np.sqrt(np.pi) * np.exp(-np.power((2. * np.pi * f -
# 2. * np.pi * self.fc), 2))
#self.cg = trapz(y[1:] / f[1:]) * (f[1]-f[0])
self.cg = quad(lambda x : 2. * np.sqrt(np.pi) * np.exp(-np.power((2. *
np.pi * x - 2. * np.pi * f0), 2)), -Inf, Inf)[0]
# compute coefficients for the dilated mother wavelet
self.coefs = self.get_coefs()
def get_coefs(self):
"""Calculate the coefficients for the Morlet mother wavelet."""
# Create array containing values used to evaluate the wavelet function
xi=np.arange(-self.len_wavelet / 2., self.len_wavelet / 2.)
# find mother wavelet coefficients at each scale
xsd = xi / (self.scales[:,np.newaxis])
mw = np.power(np.pi,-0.25) * \
(np.exp(np.complex(1j) * 2. * np.pi * self.fc * xsd) - \
np.exp(-np.power((2. * np.pi * self.fc), 2) / 2.)) * \
np.exp(-np.power(xsd, 2) / 2.)
self.coefs = mw
return mw
class Wavelet(object):
"""Class for Wavelet object.
The Wavelet object holds the wavelet coefficients as well as information on
how they were obtained.
"""
def __init__(self, wt, wavelet, weighting_function, signal_dtype, deep_copy=True):
"""Initialization of Wavelet object.
Parameters
----------
wt : array
Array of wavelet coefficients.
wavelet : object
Mother wavelet object used in the creation of `wt`.
weighting_function : function
Function used in the creation of `wt`.
signal_dtype : dtype
dtype of signal used in the creation of `wt`.
deep_copy : bool
If true (default), the mother wavelet object used in the creation of
the wavelet object will be fully copied and accessible through
wavelet.motherwavelet; if false, wavelet.motherwavelet will be a
reference to the motherwavelet object (that is, if you change the
mother wavelet object, you will see the changes when accessing the
mother wavelet through the wavelet object - this is NOT good for
tracking how the wavelet transform was computed, but setting
deep_copy to False will save memory).
Returns
-------
Returns an instance of the Wavelet class.
"""
from copy import deepcopy
self.coefs = wt[:,0:wavelet.len_signal]
if wavelet.len_signal != wavelet.len_wavelet:
self._pad_coefs = wt[:,wavelet.len_signal:]
else:
self._pad_coefs = None
if deep_copy:
self.motherwavelet = deepcopy(wavelet)
else:
self.motherwavelet = wavelet
self.weighting_function = weighting_function
self._signal_dtype = signal_dtype
def get_gws(self):
"""Calculate Global Wavelet Spectrum.
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
gws = self.get_wavelet_var()
return gws
def get_wes(self):
"""Calculate Wavelet Energy Spectrum.
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
from scipy.integrate import trapz
coef = 1. / (self.motherwavelet.fc * self.motherwavelet.cg)
wes = coef * trapz(np.power(np.abs(self.coefs), 2), axis = 1);
return wes
def get_wps(self):
"""Calculate Wavelet Power Spectrum.
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
wps = (1./ self.motherwavelet.len_signal) * self.get_wes()
return wps
def get_wavelet_var(self):
"""Calculate Wavelet Variance (a.k.a. the Global Wavelet Spectrum of
Torrence and Compo (1998)).
References
----------
Torrence, C., and G. P. Compo, 1998: A Practical Guide to Wavelet
Analysis. Bulletin of the American Meteorological Society, 79, 1,
pp. 61-78.
"""
coef = self.motherwavelet.cg * self.motherwavelet.fc
wvar = (coef / self.motherwavelet.len_signal) * self.get_wes()
return wvar
def scalogram(self, show_coi=False, show_wps=False, ts=None, time=None,
use_period=True, ylog_base=None, xlog_base=None,
origin='top', figname=None):
""" Scalogram plotting routine.
Creates a simple scalogram, with optional wavelet power spectrum and
time series plots of the transformed signal.
Parameters
----------
show_coi : bool
Set to True to see Cone of Influence
show_wps : bool
Set to True to see the Wavelet Power Spectrum
ts : array
1D array containing time series data used in wavelet transform. If set,
time series will be plotted.
time : array of datetime objects
1D array containing time information
use_period : bool
Set to True to see figures use period instead of scale
ylog_base : float
If a log scale is desired, set `ylog_base` as float. (for log 10, set
ylog_base = 10)
xlog_base : float
If a log scale is desired, set `xlog_base` as float. (for log 10, set
xlog_base = 10) *note that this option is only valid for the wavelet power
spectrum figure.
origin : 'top' or 'bottom'
Set origin of scale axis to top or bottom of figure
Returns
-------
None
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram(origin = 'bottom')
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pylab import poly_between
if ts is not None:
show_ts = True
else:
show_ts = False
if not show_wps and not show_ts:
# only show scalogram
figrow = 1
figcol = 1
elif show_wps and not show_ts:
# show scalogram and wps
figrow = 1
figcol = 4
elif not show_wps and show_ts:
# show scalogram and ts
figrow = 2
figcol = 1
else:
# show scalogram, wps, and ts
figrow = 2
figcol = 4
if time is None:
x = np.arange(self.motherwavelet.len_signal)
else:
x = time
if use_period:
y = self.motherwavelet.scales / self.motherwavelet.fc
else:
y = self.motherwavelet.scales
fig = plt.figure(figsize=(16, 12), dpi=160)
ax1 = fig.add_subplot(figrow, figcol, 1)
# if show wps, give 3/4 space to scalogram, 1/4 to wps
if show_wps:
# create temp axis at 3 or 4 col of row 1
axt = fig.add_subplot(figrow, figcol, 3)
# get location of axtmp and ax1
axt_pos = axt.get_position()
ax1_pos = ax1.get_position()
axt_points = axt_pos.get_points()
ax1_points = ax1_pos.get_points()
# set axt_pos left bound to that of ax1
axt_points[0][0] = ax1_points[0][0]
ax1.set_position(axt_pos)
fig.delaxes(axt)
if show_coi:
# coi_coef is defined using the assumption that you are using
# period, not scale, in plotting - this handles that behavior
if use_period:
coi = self.motherwavelet.get_coi() / self.motherwavelet.fc / self.motherwavelet.sampf
else:
coi = self.motherwavelet.get_coi()
coi[coi == 0] = y.min() - 0.1 * y.min()
xs, ys = poly_between(np.arange(0, len(coi)), np.max(y), coi)
ax1.fill(xs, ys, 'k', alpha=0.4, zorder = 2)
contf=ax1.contourf(x,y,np.abs(self.coefs)**2)
fig.colorbar(contf, ax=ax1, orientation='vertical', format='%2.1f')
if ylog_base is not None:
ax1.axes.set_yscale('log', basey=ylog_base)
if origin is 'top':
ax1.set_ylim((y[-1], y[0]))
elif origin is 'bottom':
ax1.set_ylim((y[0], y[-1]))
else:
raise OriginError('`origin` must be set to "top" or "bottom"')
ax1.set_xlim((x[0], x[-1]))
ax1.set_title('scalogram')
ax1.set_ylabel('time')
if use_period:
ax1.set_ylabel('period')
ax1.set_xlabel('time')
else:
ax1.set_ylabel('scales')
if time is not None:
ax1.set_xlabel('time')
else:
ax1.set_xlabel('sample')
if show_wps:
ax2 = fig.add_subplot(figrow,figcol,4,sharey=ax1)
if use_period:
ax2.plot(self.get_wps(), y, 'k')
else:
ax2.plot(self.motherwavelet.fc * self.get_wps(), y, 'k')
if ylog_base is not None:
ax2.axes.set_yscale('log', basey=ylog_base)
if xlog_base is not None:
ax2.axes.set_xscale('log', basey=xlog_base)
if origin is 'top':
ax2.set_ylim((y[-1], y[0]))
else:
ax2.set_ylim((y[0], y[-1]))
if use_period:
ax2.set_ylabel('period')
else:
ax2.set_ylabel('scales')
ax2.grid()
ax2.set_title('wavelet power spectrum')
if show_ts:
ax3 = fig.add_subplot(figrow, 2, 3, sharex=ax1)
ax3.plot(x, ts)
ax3.set_xlim((x[0], x[-1]))
ax3.legend(['time series'])
ax3.grid()
# align time series fig with scalogram fig
t = ax3.get_position()
ax3pos=t.get_points()
ax3pos[1][0]=ax1.get_position().get_points()[1][0]
t.set_points(ax3pos)
ax3.set_position(t)
if (time is not None) or use_period:
ax3.set_xlabel('time')
else:
ax3.set_xlabel('sample')
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close('all')
def cwt(x, wavelet, weighting_function=lambda x: x**(-0.5), deep_copy=True):
"""Computes the continuous wavelet transform of x using the mother wavelet
`wavelet`.
This function computes the continuous wavelet transform of x using an
instance a mother wavelet object.
The cwt is defined as:
T(a,b) = w(a) integral(-inf,inf)(x(t) * psi*{(t-b)/a} dt
which is a convolution. In this algorithm, the convolution in the time
domain is implemented as a multiplication in the Fourier domain.
Parameters
----------
x : 1D array
Time series to be transformed by the cwt
wavelet : Instance of the MotherWavelet class
Instance of the MotherWavelet class for a particular wavelet family
weighting_function: Function used to weight
Typically w(a) = a^(-0.5) is chosen as it ensures that the
wavelets at every scale have the same energy.
deep_copy : bool
If true (default), the mother wavelet object used in the creation of
the wavelet object will be fully copied and accessible through
wavelet.motherwavelet; if false, wavelet.motherwavelet will be a
reference to the motherwavelet object (that is, if you change the
mother wavelet object, you will see the changes when accessing the
mother wavelet through the wavelet object - this is NOT good for
tracking how the wavelet transform was computed, but setting
deep_copy to False will save memory).
Returns
-------
Returns an instance of the Wavelet class. The coefficients of the transform
can be obtain by the coefs() method (i.e. wavelet.coefs() )
Examples
--------
Create instance of SDG mother wavelet, normalized, using 10 scales and the
center frequency of the Fourier transform as the characteristic frequency.
Then, perform the continuous wavelet transform and plot the scalogram.
# x = numpy.arange(0,2*numpy.pi,numpy.pi/8.)
# data = numpy.sin(x**2)
# scales = numpy.arange(10)
#
# mother_wavelet = SDG(len_signal = len(data), scales = np.arange(10), normalize = True, fc = 'center')
# wavelet = cwt(data, mother_wavelet)
# wave_coefs.scalogram()
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
signal_dtype = x.dtype
if len(x) < wavelet.len_wavelet:
n = len(x)
x = np.resize(x, (wavelet.len_wavelet,))
x[n:] = 0
# Transform the signal and mother wavelet into the Fourier domain
xf=fft(x)
mwf=fft(wavelet.coefs.conj(), axis=1)
# Convolve (multiply in Fourier space)
wt_tmp=ifft(mwf*xf[np.newaxis,:], axis=1)
# shift output from ifft and multiply by weighting function
wt = fftshift(wt_tmp,axes=[1]) * weighting_function(wavelet.scales[:, np.newaxis])
# if mother wavelet and signal are real, only keep real part of transform
wt=wt.astype(np.lib.common_type(wavelet.coefs, x))
return Wavelet(wt,wavelet,weighting_function,signal_dtype,deep_copy)
def ccwt(x1, x2, wavelet):
"""Compute the continuous cross-wavelet transform of 'x1' and 'x2' using the
mother wavelet 'wavelet', which is an instance of the MotherWavelet class.
Parameters
----------
x1,x2 : 1D array
Time series used to compute cross-wavelet transform
wavelet : Instance of the MotherWavelet class
Instance of the MotherWavelet class for a particular wavelet family
Returns
-------
Returns an instance of the Wavelet class.
"""
xwt = cwt(x1,wavelet) * np.conjugate(cwt(x2, wavelet))
return xwt
def icwt(wavelet):
"""Compute the inverse continuous wavelet transform.
Parameters
----------
wavelet : Instance of the MotherWavelet class
instance of the MotherWavelet class for a particular wavelet family
Examples
--------
Use the Morlet mother wavelet to perform wavelet transform on 'data', then
use icwt to compute the inverse wavelet transform to come up with an estimate
of data ('data2'). Note that data2 is not exactly equal data.
# import matplotlib.pyplot as plt
# from scipy.signal import SDG, Morlet, cwt, icwt, fft, ifft
# import numpy as np
#
# x = np.arange(0,2*np.pi,np.pi/64)
# data = np.sin(8*x)
# scales=np.arange(0.5,17)
#
# mother_wavelet = Morlet(len_signal = len(data), scales = scales)
# wave_coefs=cwt(data, mother_wavelet)
# data2 = icwt(wave_coefs)
#
# plt.plot(data)
# plt.plot(data2)
# plt.show()
References
----------
Addison, P. S., 2002: The Illustrated Wavelet Transform Handbook. Taylor
and Francis Group, New York/London. 353 pp.
"""
from scipy.integrate import trapz
# if original wavelet was created using padding, make sure to include
# information that is missing after truncation (see self.coefs under __init__
# in class Wavelet.
if wavelet.motherwavelet.len_signal != wavelet.motherwavelet.len_wavelet:
full_wc = np.c_[wavelet.coefs,wavelet._pad_coefs]
else:
full_wc = wavelet.coefs
# get wavelet coefficients and take fft
wcf = fft(full_wc,axis=1)
# get mother wavelet coefficients and take fft
mwf = fft(wavelet.motherwavelet.coefs,axis=1)
# perform inverse continuous wavelet transform and make sure the result is the same type
# (real or complex) as the original data used in the transform
x = (1. / wavelet.motherwavelet.cg) * trapz(
fftshift(ifft(wcf * mwf,axis=1),axes=[1]) /
(wavelet.motherwavelet.scales[:,np.newaxis]**2),
dx = 1. / wavelet.motherwavelet.sampf, axis=0)
return x[0:wavelet.motherwavelet.len_signal].astype(wavelet._signal_dtype) | bsd-3-clause |
IshankGulati/scikit-learn | examples/applications/face_recognition.py | 44 | 5706 | """
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset:
================== ============ ======= ========== =======
precision recall f1-score support
================== ============ ======= ========== =======
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
================== ============ ======= ========== =======
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = PCA(n_components=n_components, svd_solver='randomized',
whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
| bsd-3-clause |
wzmao/mbio | mbio/Application/plotting.py | 1 | 1051 | # -*- coding: utf-8 -*-
"""This module contains some setting functions.
"""
__author__ = 'Wenzhi Mao'
__all__ = ['setAxesEqual']
def setAxesEqual(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
from numpy import mean
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = x_limits[1] - x_limits[0]
x_mean = mean(x_limits)
y_range = y_limits[1] - y_limits[0]
y_mean = mean(y_limits)
z_range = z_limits[1] - z_limits[0]
z_mean = mean(z_limits)
plot_radius = 0.5 * max([x_range, y_range, z_range])
ax.set_xlim3d([x_mean - plot_radius, x_mean + plot_radius])
ax.set_ylim3d([y_mean - plot_radius, y_mean + plot_radius])
ax.set_zlim3d([z_mean - plot_radius, z_mean + plot_radius])
return None
| mit |
mlyundin/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
AtlasMaxima/unearthedSydney | mesh.py | 1 | 4385 | from scipy.spatial import cKDTree
import numpy as np
import csv
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import ConvexHull, Delaunay
import folium
from folium import plugins
from folium.plugins import HeatMap
def read_data(input_filename):
"""Read in a file, returning an array and a kdtree"""
with open(input_filename) as input_file:
input_reader = csv.reader(input_file)
data_points = np.array([tuple(map(float,line)) for line in input_reader])
kdtree = cKDTree(data_points[:,[0,1]])
return (data_points, kdtree)
def create_kdtree(data_points):
return (data_points, cKDTree(data_points[:,[0,1]]))
def bounds(kdtree):
"""Return the bounds of a kdtree"""
return (tuple(kdtree.mins), tuple(kdtree.maxes))
def query(data_points, kdtree, queries, resolution):
"""Given the data, a kdtree, a list of points and
a radius to search in, return a value for each point"""
return [point_value(data_points[x][:,[2]],data_points,queries[n],kdtree)
for n,x in enumerate(kdtree.query_ball_point(queries,r=resolution))]
def point_value(arr,data_points,x,kdtree):
"""Get the mean value of a set of points returned from
a ball query on the kdtree. If there are no points in the radius,
just return the closest"""
if len(arr) == 0: return data_points[kdtree.query(x)[1]][2]
return np.mean(arr)
def grid(data_points, kdtree, xbounds, ybounds, num_xsteps=300, num_ysteps=300):
"""Return a grid of z value between the bounds given. It'll work for
things that aren't square, but please try and make it a square"""
(minx, maxx) = xbounds
(miny, maxy) = ybounds
res = np.zeros((num_xsteps,num_ysteps))
xsteps = np.linspace(minx,maxx,num_xsteps)
ysteps = np.linspace(miny,maxy,num_ysteps)
for (i,x) in enumerate(xsteps):
for (j,y) in enumerate(ysteps):
res[(i,j)] = query(data_points, kdtree, [(x,y)], max((maxx-minx)/num_xsteps,(maxy-miny)/num_ysteps))[0]
return (xsteps,ysteps,res)
def draw(xs,ys,zs):
"""Draw a mesh from a grid of points
:param xs: A 1-D array of x-coordinates
:param ys: A 1-D array of y-cooridinates
:param zs: A 2-D array of heights"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs,ys = np.meshgrid(xs,ys)
ax.plot_surface(xs,ys,zs)
plt.show()
def downscale(data_points, kdtree, xbounds, ybounds, num_xsteps=300, num_ysteps=300):
"""Make a new dataset at a lower resolution. This won't interpolate to create
new points, so it can't be used to increase resolution
:param data_points: An array of x,y,z values
:param kdtree: The matching tree
"""
(xs,ys,data) = grid(data_points, kdtree, xbounds, ybounds, num_xsteps, num_ysteps)
(xs,ys) = np.meshgrid(xs,ys)
xs = xs.flatten()
ys = ys.flatten()
data = data.flatten()
new_data = np.array(xs,ys,data).T
return (new_data, cKDTree(new_data[:,[0,1]]))
def merge(old_data_points, new_data_points):
"""Merge two sets of data. The first one must be larger
:param old_data_points: The "base" dataset
:param new_data_points: The new data
:returns: A pair containing an array for all the data, and the corresponding kd-tree"""
hull = Delaunay(new_data_points[:,[1,2]])
smaller = np.array(old_data_points[hull.find_simplex(old_data_points[:,[0,1]]) < 0])
merged_data = np.concatenate((new_data_points, smaller))
return create_kdtree(merged_data)
def read_tiered_data(filenames):
"""Given a list of files partially ordered by resolution
(low to high) return a kdtree and dataset
:param filenames: A list of filenames
:returns: A pair containing an array for all the data, and the corresponding kd-tree
"""
initial = None
kdtree = None
for f in filenames:
if kdtree is None:
initial,kdtree = read_data(f)
else:
(d,t) = read_data(f)
(initial,kdtree) = merge(initial,d)
return (initial, kdtree)
def draw_heatmap(x,y,z):
"""Draw a heatmap using folium. Not really that useful"""
x,y = np.meshgrid(x,y)
terrain_map = folium.Map(location=[x[0,0], y[0,0]], tiles='Stamen Terrain', zoom_start=12)
HeatMap(zip(x.flatten(),y.flatten(),z.flatten()), radius=10).add_to(terrain_map)
terrain_map.save('map.html')
| mit |
azogue/esiosdata | esiosdata/__main__.py | 1 | 4429 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 18:16:24 2015
DataBase de datos de consumo eléctrico
@author: Eugenio Panadero
"""
import argparse
import pandas as pd
from esiosdata.classdataesios import PVPC, DatosREE
from prettyprinting import print_secc, print_info, print_cyan, print_red
__author__ = 'Eugenio Panadero'
__copyright__ = "Copyright 2015, AzogueLabs"
__credits__ = ["Eugenio Panadero"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Eugenio Panadero"
# Columnas base de los datos de PVPC
# DEFAULT_COLUMNS_PVPC = ['GEN', 'NOC', 'VHC']
# ------------------------------------
# MAIN CLI
# ------------------------------------
def main_cli():
"""
Actualiza la base de datos de PVPC/DEMANDA almacenados como dataframe en local,
creando una nueva si no existe o hubiere algún problema. Los datos registrados se guardan en HDF5
"""
def _get_parser_args():
p = argparse.ArgumentParser(description='Gestor de DB de PVPC/DEMANDA (esios.ree.es)')
p.add_argument('-d', '--dem', action='store_true', help='Selecciona BD de demanda (BD de PVPC por defecto)')
p.add_argument('-i', '--info', action='store', nargs='*',
help="Muestra información de la BD seleccionada. "
"* Puede usar intervalos temporales y nombres de columnas, "
"como '-i gen noc 2017-01-24 2017-01-26'")
p.add_argument('-fu', '-FU', '--forceupdate', action='store_true',
help="Fuerza la reconstrucción total de la BD seleccionada")
p.add_argument('-u', '-U', '--update', action='store_true',
help="Actualiza la información de la BD seleccionada hasta el instante actual")
p.add_argument('-p', '--plot', action='store_true', help="Genera plots de la información filtrada de la BD")
p.add_argument('-v', '--verbose', action='store_true', help='Muestra información extra')
arguments = p.parse_args()
return arguments, p
def _parse_date(string, columns):
try:
ts = pd.Timestamp(string)
print_cyan('{} es timestamp: {:%c} --> {}'.format(string, ts, ts.date()))
columns.remove(string)
return ts.date().isoformat()
except ValueError:
pass
args, parser = _get_parser_args()
print_secc('ESIOS PVPC/DEMANDA')
if args.dem:
db_web = DatosREE(update=args.update, force_update=args.forceupdate, verbose=args.verbose)
else:
db_web = PVPC(update=args.update, force_update=args.forceupdate, verbose=args.verbose)
data = db_web.data['data']
if args.info is not None:
if len(args.info) > 0:
cols = args.info.copy()
dates = [d for d in [_parse_date(s, cols) for s in args.info] if d]
if len(dates) == 2:
data = data.loc[dates[0]:dates[1]]
elif len(dates) == 1:
data = data.loc[dates[0]]
if len(cols) > 0:
try:
data = data[[c.upper() for c in cols]]
except KeyError as e:
print_red('NO SE PUEDE FILTRAR LA COLUMNA (Exception: {})\nLAS COLUMNAS DISPONIBLES SON:\n{}'
.format(e, data.columns))
print_info(data)
else:
print_secc('LAST 24h in DB:')
print_info(data.iloc[-24:])
print_cyan(data.columns)
if args.plot:
if args.dem:
from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora
print_red('IMPLEMENTAR PLOTS DEM')
else:
from esiosdata.pvpcplot import pvpcplot_tarifas_hora, pvpcplot_grid_hora
if len(data) < 750:
pvpcplot_grid_hora(data)
# pvpcplot_tarifas_hora(data)
else:
print_red('La selección para plot es excesiva: {} samples de {} a {}\nSe hace plot de las últimas 24h'.
format(len(data), data.index[0], data.index[-1]))
pvpcplot_grid_hora(db_web.data['data'].iloc[-24:])
pvpcplot_tarifas_hora(db_web.data['data'].iloc[-24:])
# , ax=None, show=True, ymax=None, plot_perdidas=True, fs=FIGSIZE)
# return db_web, db_web.data['data']
if __name__ == '__main__':
# datos_web, _data_dem = main_cli()
main_cli()
| mit |
Jul13/wepy | wepy/io/wiki_store.py | 1 | 2279 | # Author: Gheorghe Postelnicu
# import glob
import os
from datetime import datetime
import pandas as pd
from functools import lru_cache
from pandas import HDFStore
from topyc.util.file import latest_filename
class WikiStore(object):
"""
WikiStore is a HDFStore storage for a Quandl WIKI dataset.
The Quandl WIKI dataset can be retrieved from: https://www.quandl.com/data/WIKI-Wiki-EOD-Stock-Prices.
"""
def __init__(self, base_dir, date_index=True):
self.base_dir = base_dir
assert os.path.exists(self.base_dir)
self.date_index = date_index
self._init()
def keys(self):
return self.tickers
@lru_cache(maxsize=100)
def __getitem__(self, item):
df = self.store[item]
if self.date_index:
df.set_index('date', inplace=True)
return df
@staticmethod
def store_snapshot(base_dir, snapshot_file):
w_df = pd.read_csv(snapshot_file, parse_dates=[1])
w_df.columns = [c.replace('-', '_') for c in w_df.columns]
w_df.set_index('ticker', inplace=True)
w_df.sort_index(inplace=True)
snapshot_file = datetime.today().strftime('%Y%m%d')
with HDFStore(os.path.join(base_dir, '{}.h5'.format(snapshot_file)), 'w',
complevel=6, complib='blosc') as store:
tickers = set(w_df.index)
for ticker in tickers:
df = w_df.loc[ticker, :]
df.reset_index(inplace=True)
df = df.drop('ticker', 1)
store[ticker] = df
def _init(self):
self.store = HDFStore(latest_filename('{}/*.h5'.format(self.base_dir)))
self.tickers = [t[1:] for t in self.store.keys()]
def close(self):
self.store.close()
def tickers_column(self, tickers, col='adj_close', fun_filter=None):
if not tickers:
return None
def fetch_column(ticker):
ticker_dat = self[ticker]
df = ticker_dat[[col]]
df.columns = [ticker]
if fun_filter:
df = fun_filter(df)
return df
buf = [fetch_column(ticker) for ticker in tickers]
if len(tickers) == 1:
return buf[0]
return buf[0].join(buf[1:])
| apache-2.0 |
herberthamaral/mestrado | CE/segunda_lista/evolution_strategy.py | 1 | 2262 | # encoding: utf-8
import time
import math
from random import random, randint
import numpy as np
def es(fitness, bounds_min, bounds_max, mu, lambda_, dimension, sigma_init=1, sigma_min=float('-inf'), tau=None, maxiter=float('inf'), max_execution_time=float('inf')):
if not tau:
tau = 1/math.sqrt(2*dimension)
population_x = np.random.uniform(bounds_min, bounds_max, size=(1, mu, dimension))[0]
population = [(xi, sigma_init, fitness(xi)) for xi in population_x]
iterations = 0
start_time = time.time()
fitness_evolution = []
while True:
for l in range(lambda_):
recombinant = recombine(population, mu, fitness)
offspring_individual_sigma = recombinant[1] * math.exp(tau*random())
mutation = offspring_individual_sigma*np.random.randn(1,dimension)[0]
offspring_individual_x = recombinant[0]+mutation
#print mutation
offspring_individual_fitness = fitness(offspring_individual_x)
population.append((offspring_individual_x, offspring_individual_sigma, offspring_individual_fitness))
population = sort_poulation(population, mu)
iterations += 1
fitness_evolution.append(population[0][2])
if population[0][1] < sigma_min or maxiter < iterations or start_time+max_execution_time < time.time():
return population[0], fitness_evolution
def recombine(population, mu, fitness):
population = sort_poulation(population, mu)
dimension = len(population[0][0])
x = []
sigma = 0
for i in range(dimension):
individual = population[randint(0, mu-1)]
x.append(individual[0][i])
sigma += individual[1]
return (x, sigma/mu, fitness(x))
def sort_poulation(population, mu):
return sorted(population, key=lambda x: x[2])[:mu]
if __name__ == '__main__':
def rastrigin(x):
n = len(x)
value = 10*n + sum([x[i]**2 - 10*math.cos(2*math.pi*x[i]) for i in range(n)])
return value
result = es(fitness=rastrigin, bounds_min=-5.12, bounds_max=5.12, mu=20, lambda_=5, dimension=5, maxiter=200, sigma_init=20)
import matplotlib.pyplot as plt
plt.plot(range(len(result[1])), result[1])
print result[0]
plt.savefig('es.png')
| apache-2.0 |
procoder317/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dviread.py | 69 | 29920 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor.
Interface::
dvi = Dvi(filename, 72)
for page in dvi: # iterate over pages
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
import numpy as np
import struct
import subprocess
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, pages) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h = _mul2012(font._scale, font._tfm.height[g])
e = _mul2012(font._scale, font._tfm.depth[g])
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=maxy-maxy_pure)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
text = [ ((x-minx)*d, (maxy-y)*d, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=(maxy-maxy_pure)*d)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1))
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument "nbytes" long.
Signedness is determined by the "signed" keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode "byte", read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError, "unknown command: byte %d"%byte
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of dvi file"
if i != 2:
raise ValueError, "Unknown dvi format %d"%i
if num != 25400000 or den != 7227 * 2**16:
raise ValueError, "nonstandard units in dvi file"
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError, "nonstandard magnification in dvi file"
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_char in dvi file"
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced set_rule in dvi file"
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_char in dvi file"
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced put_rule in dvi file"
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError, \
"misplaced bop in dvi file (state %d)" % self.state
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced eop in dvi file"
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced push in dvi file"
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced pop in dvi file"
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced right in dvi file"
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced w in dvi file"
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced x in dvi file"
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced down in dvi file"
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced y in dvi file"
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced z in dvi file"
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError, "misplaced fnt_num in dvi file"
self.f = k
def _xxx(self, special):
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:])
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError, 'tfm checksum mismatch: %s'%n
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:])
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError, "misplaced post in dvi file"
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are _not_ used for comparison.
The size is in Adobe points (converted from TeX points).
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(tfm.width.iterkeys())
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in range(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError, "Packet length mismatch in vf file"
else:
if byte in (139, 140) or byte >= 243:
raise ValueError, "Inappropriate opcode %d in vf file" % byte
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError, "unknown vf opcode %d" % byte
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError, "Misplaced packet in vf file"
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError, "pre command in middle of vf file"
if i != 202:
raise ValueError, "Unknown vf format %d" % i
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
Attributes:
checksum: for verifying against dvi file
design_size: design size of the font (in what units?)
width[i]: width of character \#i, needs to be scaled
by the factor specified in the dvi file
(this is a dict because indexing may not start from 0)
height[i], depth[i]: height and depth of character \#i
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
file = open(filename, 'rb')
try:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack('!6H', header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack('!2I', header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
finally:
file.close()
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack('!%dI' % (len(x)/4), x)
for x in (widths, heights, depths) ]
for i in range(ec-bc):
self.width[bc+i] = _fix2comp(widths[ord(char_info[4*i])])
self.height[bc+i] = _fix2comp(heights[ord(char_info[4*i+1]) >> 4])
self.depth[bc+i] = _fix2comp(depths[ord(char_info[4*i+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage: map = PsfontsMap('.../psfonts.map'); map['cmr10']
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts, while
the pdf-related files perhaps only avoid the "Base 14" pdf fonts.
But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
file = open(filename, 'rt')
try:
self._parse(file)
finally:
file.close()
def __getitem__(self, texname):
result = self._font[texname]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
texname, psname = words[:2]
effects, encoding, filename = [], None, None
for word in words[2:]:
if not word.startswith('<'):
effects.append(word)
else:
word = word.lstrip('<')
if word.startswith('['):
assert encoding is None
encoding = word[1:]
elif word.endswith('.enc'):
assert encoding is None
encoding = word
else:
assert filename is None
filename = word
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
file = open(filename, 'rt')
try:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + `self.encoding`, 'debug-annoying')
finally:
file.close()
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError, "Broken name in encoding file: " + w
return result
def find_tex_file(filename, format=None):
"""
Call kpsewhich to find a file in the texmf tree.
If format is not None, it is used as the value for the --format option.
See the kpathsea documentation for more information.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result
def _read_nointr(pipe, bufsize=-1):
while True:
try:
return pipe.read(bufsize)
except OSError, e:
if e.errno == errno.EINTR:
continue
else:
raise
# With multiple text objects per figure (e.g. tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print '=== new page ==='
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print 'font', f.texname, 'scaled', f._scale/pow(2.0,20)
fPrev = f
print x,y,c, 32 <= c < 128 and chr(c) or '.', w
for x,y,w,h in page.boxes:
print x,y,'BOX',w,h
| agpl-3.0 |
hrjn/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 58 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for parallelized ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
jairideout/scikit-bio | skbio/stats/distance/_mantel.py | 8 | 19197 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip
from itertools import combinations
import six
import numpy as np
import pandas as pd
import scipy.misc
from scipy.stats import pearsonr, spearmanr
from skbio.stats.distance import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
strict=True, lookup=None):
"""Compute correlation between distance matrices using the Mantel test.
The Mantel test compares two distance matrices by computing the correlation
between the distances in the lower (or upper) triangular portions of the
symmetric distance matrices. Correlation can be computed using Pearson's
product-moment correlation coefficient or Spearman's rank correlation
coefficient.
As defined in [1]_, the Mantel test computes a test statistic :math:`r_M`
given two symmetric distance matrices :math:`D_X` and :math:`D_Y`.
:math:`r_M` is defined as
.. math::
r_M=\\frac{1}{d-1}\\sum_{i=1}^{n-1}\\sum_{j=i+1}^{n}
stand(D_X)_{ij}stand(D_Y)_{ij}
where
.. math::
d=\\frac{n(n-1)}{2}
and :math:`n` is the number of rows/columns in each of the distance
matrices. :math:`stand(D_X)` and :math:`stand(D_Y)` are distance matrices
with their upper triangles containing standardized distances. Note that
since :math:`D_X` and :math:`D_Y` are symmetric, the lower triangular
portions of the matrices could equivalently have been used instead of the
upper triangular portions (the current function behaves in this manner).
If ``method='spearman'``, the above equation operates on ranked distances
instead of the original distances.
Statistical significance is assessed via a permutation test. The rows and
columns of the first distance matrix (`x`) are randomly permuted a
number of times (controlled via `permutations`). A correlation coefficient
is computed for each permutation and the p-value is the proportion of
permuted correlation coefficients that are equal to or more extreme
than the original (unpermuted) correlation coefficient. Whether a permuted
correlation coefficient is "more extreme" than the original correlation
coefficient depends on the alternative hypothesis (controlled via
`alternative`).
Parameters
----------
x, y : DistanceMatrix or array_like
Input distance matrices to compare. If `x` and `y` are both
``DistanceMatrix`` instances, they will be reordered based on matching
IDs (see `strict` and `lookup` below for handling matching/mismatching
IDs); thus they are not required to be in the same ID order. If `x` and
`y` are ``array_like``, no reordering is applied and both matrices must
have the same shape. In either case, `x` and `y` must be at least 3x3
in size *after* reordering and matching of IDs.
method : {'pearson', 'spearman'}
Method used to compute the correlation between distance matrices.
permutations : int, optional
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
alternative : {'two-sided', 'greater', 'less'}
Alternative hypothesis to use when calculating statistical
significance. The default ``'two-sided'`` alternative hypothesis
calculates the proportion of permuted correlation coefficients whose
magnitude (i.e. after taking the absolute value) is greater than or
equal to the absolute value of the original correlation coefficient.
``'greater'`` calculates the proportion of permuted coefficients that
are greater than or equal to the original coefficient. ``'less'``
calculates the proportion of permuted coefficients that are less than
or equal to the original coefficient.
strict : bool, optional
If ``True``, raises a ``ValueError`` if IDs are found that do not exist
in both distance matrices. If ``False``, any nonmatching IDs are
discarded before running the test. See `n` (in Returns section below)
for the number of matching IDs that were used in the test. This
parameter is ignored if `x` and `y` are ``array_like``.
lookup : dict, optional
Maps each ID in the distance matrices to a new ID. Used to match up IDs
across distance matrices prior to running the Mantel test. If the IDs
already match between the distance matrices, this parameter is not
necessary. This parameter is disallowed if `x` and `y` are
``array_like``.
Returns
-------
corr_coeff : float
Correlation coefficient of the test (depends on `method`).
p_value : float
p-value of the test.
n : int
Number of rows/columns in each of the distance matrices, after any
reordering/matching of IDs. If ``strict=False``, nonmatching IDs may
have been discarded from one or both of the distance matrices prior to
running the Mantel test, so this value may be important as it indicates
the *actual* size of the matrices that were compared.
Raises
------
ValueError
If `x` and `y` are not at least 3x3 in size after reordering/matching
of IDs, or an invalid `method`, number of `permutations`, or
`alternative` are provided.
TypeError
If `x` and `y` are not both ``DistanceMatrix`` instances or
``array_like``.
See Also
--------
DistanceMatrix
scipy.stats.pearsonr
scipy.stats.spearmanr
pwmantel
Notes
-----
The Mantel test was first described in [2]_. The general algorithm and
interface are similar to ``vegan::mantel``, available in R's vegan
package [3]_.
``np.nan`` will be returned for the p-value if `permutations` is zero or if
the correlation coefficient is ``np.nan``. The correlation coefficient will
be ``np.nan`` if one or both of the inputs does not have any variation
(i.e. the distances are all constant) and ``method='spearman'``.
References
----------
.. [1] Legendre, P. and Legendre, L. (2012) Numerical Ecology. 3rd English
Edition. Elsevier.
.. [2] Mantel, N. (1967). "The detection of disease clustering and a
generalized regression approach". Cancer Research 27 (2): 209-220. PMID
6018555.
.. [3] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
Import the functionality we'll use in the following examples:
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import mantel
Define two 3x3 distance matrices:
>>> x = DistanceMatrix([[0, 1, 2],
... [1, 0, 3],
... [2, 3, 0]])
>>> y = DistanceMatrix([[0, 2, 7],
... [2, 0, 6],
... [7, 6, 0]])
Compute the Pearson correlation between them and assess significance using
a two-sided test with 999 permutations:
>>> coeff, p_value, n = mantel(x, y)
>>> print(round(coeff, 4))
0.7559
Thus, we see a moderate-to-strong positive correlation (:math:`r_M=0.7559`)
between the two matrices.
In the previous example, the distance matrices (``x`` and ``y``) have the
same IDs, in the same order:
>>> x.ids
('0', '1', '2')
>>> y.ids
('0', '1', '2')
If necessary, ``mantel`` will reorder the distance matrices prior to
running the test. The function also supports a ``lookup`` dictionary that
maps distance matrix IDs to new IDs, providing a way to match IDs between
distance matrices prior to running the Mantel test.
For example, let's reassign the distance matrices' IDs so that there are no
matching IDs between them:
>>> x.ids = ('a', 'b', 'c')
>>> y.ids = ('d', 'e', 'f')
If we rerun ``mantel``, we get the following error notifying us that there
are nonmatching IDs (this is the default behavior with ``strict=True``):
>>> mantel(x, y)
Traceback (most recent call last):
...
ValueError: IDs exist that are not in both distance matrices.
If we pass ``strict=False`` to ignore/discard nonmatching IDs, we see that
no matches exist between `x` and `y`, so the Mantel test still cannot be
run:
>>> mantel(x, y, strict=False)
Traceback (most recent call last):
...
ValueError: No matching IDs exist between the distance matrices.
To work around this, we can define a ``lookup`` dictionary to specify how
the IDs should be matched between distance matrices:
>>> lookup = {'a': 'A', 'b': 'B', 'c': 'C',
... 'd': 'A', 'e': 'B', 'f': 'C'}
``lookup`` maps each ID to ``'A'``, ``'B'``, or ``'C'``. If we rerun
``mantel`` with ``lookup``, we get the same results as the original
example where all distance matrix IDs matched:
>>> coeff, p_value, n = mantel(x, y, lookup=lookup)
>>> print(round(coeff, 4))
0.7559
``mantel`` also accepts input that is ``array_like``. For example, if we
redefine `x` and `y` as nested Python lists instead of ``DistanceMatrix``
instances, we obtain the same result:
>>> x = [[0, 1, 2],
... [1, 0, 3],
... [2, 3, 0]]
>>> y = [[0, 2, 7],
... [2, 0, 6],
... [7, 6, 0]]
>>> coeff, p_value, n = mantel(x, y)
>>> print(round(coeff, 4))
0.7559
It is import to note that reordering/matching of IDs (and hence the
``strict`` and ``lookup`` parameters) do not apply when input is
``array_like`` because there is no notion of IDs.
"""
if method == 'pearson':
corr_func = pearsonr
elif method == 'spearman':
corr_func = spearmanr
else:
raise ValueError("Invalid correlation method '%s'." % method)
if permutations < 0:
raise ValueError("Number of permutations must be greater than or "
"equal to zero.")
if alternative not in ('two-sided', 'greater', 'less'):
raise ValueError("Invalid alternative hypothesis '%s'." % alternative)
x, y = _order_dms(x, y, strict=strict, lookup=lookup)
n = x.shape[0]
if n < 3:
raise ValueError("Distance matrices must have at least 3 matching IDs "
"between them (i.e., minimum 3x3 in size).")
x_flat = x.condensed_form()
y_flat = y.condensed_form()
orig_stat = corr_func(x_flat, y_flat)[0]
if permutations == 0 or np.isnan(orig_stat):
p_value = np.nan
else:
perm_gen = (corr_func(x.permute(condensed=True), y_flat)[0]
for _ in range(permutations))
permuted_stats = np.fromiter(perm_gen, np.float, count=permutations)
if alternative == 'two-sided':
count_better = (np.absolute(permuted_stats) >=
np.absolute(orig_stat)).sum()
elif alternative == 'greater':
count_better = (permuted_stats >= orig_stat).sum()
else:
count_better = (permuted_stats <= orig_stat).sum()
p_value = (count_better + 1) / (permutations + 1)
return orig_stat, p_value, n
@experimental(as_of="0.4.0")
def pwmantel(dms, labels=None, method='pearson', permutations=999,
alternative='two-sided', strict=True, lookup=None):
"""Run Mantel tests for every pair of given distance matrices.
Runs a Mantel test for each pair of distance matrices and collates the
results in a ``DataFrame``. Distance matrices do not need to be in the same
ID order if they are ``DistanceMatrix`` instances. Distance matrices will
be re-ordered prior to running each pairwise test, and if ``strict=False``,
IDs that don't match between a pair of distance matrices will be dropped
prior to running the test (otherwise a ``ValueError`` will be raised if
there are nonmatching IDs between any pair of distance matrices).
Parameters
----------
dms : iterable of DistanceMatrix objects, array_like objects, or filepaths
to distance matrices. If they are ``array_like``, no reordering or
matching of IDs will be performed.
labels : iterable of str or int, optional
Labels for each distance matrix in `dms`. These are used in the results
``DataFrame`` to identify the pair of distance matrices used in a
pairwise Mantel test. If ``None``, defaults to monotonically-increasing
integers starting at zero.
method : {'pearson', 'spearman'}
Correlation method. See ``mantel`` function for more details.
permutations : int, optional
Number of permutations. See ``mantel`` function for more details.
alternative : {'two-sided', 'greater', 'less'}
Alternative hypothesis. See ``mantel`` function for more details.
strict : bool, optional
Handling of nonmatching IDs. See ``mantel`` function for more details.
lookup : dict, optional
Map existing IDs to new IDs. See ``mantel`` function for more details.
Returns
-------
pandas.DataFrame
``DataFrame`` containing the results of each pairwise test (one per
row). Includes the number of objects considered in each test as column
``n`` (after applying `lookup` and filtering nonmatching IDs if
``strict=False``). Column ``p-value`` will display p-values as ``NaN``
if p-values could not be computed (they are stored as ``np.nan`` within
the ``DataFrame``; see ``mantel`` for more details).
See Also
--------
mantel
DistanceMatrix.read
Notes
--------
Passing a list of filepaths can be useful as it allows for a smaller amount
of memory consumption as it only loads two matrices at a time as opposed to
loading all distance matrices into memory.
Examples
--------
Import the functionality we'll use in the following examples:
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import pwmantel
Define three 3x3 distance matrices:
>>> x = DistanceMatrix([[0, 1, 2],
... [1, 0, 3],
... [2, 3, 0]])
>>> y = DistanceMatrix([[0, 2, 7],
... [2, 0, 6],
... [7, 6, 0]])
>>> z = DistanceMatrix([[0, 5, 6],
... [5, 0, 1],
... [6, 1, 0]])
Run Mantel tests for each pair of distance matrices (there are 3 possible
pairs):
>>> pwmantel((x, y, z), labels=('x', 'y', 'z'),
... permutations=0) # doctest: +NORMALIZE_WHITESPACE
statistic p-value n method permutations alternative
dm1 dm2
x y 0.755929 NaN 3 pearson 0 two-sided
z -0.755929 NaN 3 pearson 0 two-sided
y z -0.142857 NaN 3 pearson 0 two-sided
Note that we passed ``permutations=0`` to suppress significance tests; the
p-values in the output are labelled ``NaN``.
"""
num_dms = len(dms)
if num_dms < 2:
raise ValueError("Must provide at least two distance matrices.")
if labels is None:
labels = range(num_dms)
else:
if num_dms != len(labels):
raise ValueError("Number of labels must match the number of "
"distance matrices.")
if len(set(labels)) != len(labels):
raise ValueError("Labels must be unique.")
num_combs = scipy.misc.comb(num_dms, 2, exact=True)
results_dtype = [('dm1', object), ('dm2', object), ('statistic', float),
('p-value', float), ('n', int), ('method', object),
('permutations', int), ('alternative', object)]
results = np.empty(num_combs, dtype=results_dtype)
for i, pair in enumerate(combinations(zip(labels, dms), 2)):
(xlabel, x), (ylabel, y) = pair
if isinstance(x, six.string_types):
x = DistanceMatrix.read(x)
if isinstance(y, six.string_types):
y = DistanceMatrix.read(y)
stat, p_val, n = mantel(x, y, method=method, permutations=permutations,
alternative=alternative, strict=strict,
lookup=lookup)
results[i] = (xlabel, ylabel, stat, p_val, n, method, permutations,
alternative)
return pd.DataFrame.from_records(results, index=('dm1', 'dm2'))
def _order_dms(x, y, strict=True, lookup=None):
"""Intersect distance matrices and put them in the same order."""
x_is_dm = isinstance(x, DistanceMatrix)
y_is_dm = isinstance(y, DistanceMatrix)
if (x_is_dm and not y_is_dm) or (y_is_dm and not x_is_dm):
raise TypeError(
"Mixing DistanceMatrix and array_like input types is not "
"supported. Both x and y must either be DistanceMatrix instances "
"or array_like, but not mixed.")
elif x_is_dm and y_is_dm:
if lookup is not None:
x = _remap_ids(x, lookup, 'x', 'first')
y = _remap_ids(y, lookup, 'y', 'second')
id_order = [id_ for id_ in x.ids if id_ in y]
num_matches = len(id_order)
if (strict and ((num_matches != len(x.ids)) or
(num_matches != len(y.ids)))):
raise ValueError("IDs exist that are not in both distance "
"matrices.")
if num_matches < 1:
raise ValueError("No matching IDs exist between the distance "
"matrices.")
return x.filter(id_order), y.filter(id_order)
else:
# Both x and y aren't DistanceMatrix instances.
if lookup is not None:
raise ValueError("ID lookup can only be provided if inputs are "
"DistanceMatrix instances.")
x = DistanceMatrix(x)
y = DistanceMatrix(y)
if x.shape != y.shape:
raise ValueError("Distance matrices must have the same shape.")
return x, y
def _remap_ids(dm, lookup, label, order):
"Return a copy of `dm` with its IDs remapped based on `lookup`."""
try:
remapped_ids = [lookup[id_] for id_ in dm.ids]
except KeyError as e:
raise KeyError("All IDs in the %s distance matrix (%s) must be in "
"the lookup. Missing ID: %s" % (order, label, str(e)))
# Create a copy as we'll be modifying the IDs in place.
dm_copy = dm.copy()
dm_copy.ids = remapped_ids
return dm_copy
| bsd-3-clause |
gotomypc/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
Chandlercjy/OnePy | OnePy/builtin_module/optimizer.py | 1 | 3297 | import multiprocessing
import os
import time
from collections import defaultdict
from itertools import count, product
from typing import Iterable, Tuple
import pandas as pd
import OnePy as op
from OnePy.sys_module.metabase_env import OnePyEnvBase
from OnePy.utils.awesome_func import run_multiprocessing
class Optimizer(OnePyEnvBase):
def __init__(self):
self.workers = os.cpu_count()
self.initial_params = defaultdict(dict)
self.mid_params = defaultdict(list)
self.final_params = None
self.strategy_names = []
self.total_iter_times = None
def refresh(self):
self.initial_params = defaultdict(dict)
self.mid_params = defaultdict(list)
self.final_params = None
self.strategy_names = []
self.total_iter_times = None
def _tuple_to_dict(self, tuple_list: Tuple[dict]):
value = {}
for i in tuple_list:
value.update(i)
return value
def _optimize_func(self, params: dict, cache: list, index: int):
t1 = time.time()
op.CleanerBase.counter = count(1) # 清空cleaner缓存,避免初始化多次
go = op.OnePiece()
for strategy_name, strategy in go.env.strategies.items():
strategy.set_params(params[strategy_name])
go.sunny(False)
summary = go.output.analysis.general_summary()
summary.update(params)
cache.append(summary)
t2 = time.time()
self._compute_running_time(t1, t2, len(cache))
def _compute_running_time(self, start: float, end: float, finished_times: int):
diff = end - start
left = diff*(self.total_iter_times-finished_times)/60/self.workers
print(f'当前是第 {finished_times} 次, 剩余 {left:.2f} mins')
def _combine_all_params(self):
for name in self.strategy_names:
strategy_params = product(*self.initial_params[name].values())
for i in strategy_params:
self.mid_params[name].append({name: self._tuple_to_dict(i)})
result = product(*self.mid_params.values())
result = [self._tuple_to_dict(i) for i in result]
unique = []
for i in range(len(result)):
new = result.pop()
if new in unique:
pass
else:
unique.append(new)
self.final_params = unique
def set_params(self, strategy_name: str, param: str, param_range: Iterable):
if strategy_name not in self.strategy_names:
self.strategy_names.append(strategy_name)
self.initial_params[strategy_name][param] = [
{param: i} for i in param_range]
def run(self, filename: str = 'optimize_result.pkl'):
self._combine_all_params()
self.total_iter_times = len(self.final_params)
print(f'一共优化 {(self.total_iter_times)} 次')
cache_list: list = multiprocessing.Manager().list()
params = [(param, cache_list, index)
for index, param in enumerate(self.final_params)]
run_multiprocessing(self._optimize_func, params, self.workers)
print('参数优化完成!')
if filename:
pd.to_pickle([i for i in cache_list], filename)
return [i for i in cache_list]
| mit |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| mit |
ndingwall/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 17 | 3420 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets import make_swiss_roll
# #############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise=noise)
# Make it thinner
X[:, 1] *= .5
# #############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.scatter(X[label == l, 0], X[label == l, 1], X[label == l, 2],
color=plt.cm.jet(float(l) / np.max(label + 1)),
s=20, edgecolor='k')
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
# #############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# #############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.scatter(X[label == l, 0], X[label == l, 1], X[label == l, 2],
color=plt.cm.jet(float(l) / np.max(label + 1)),
s=20, edgecolor='k')
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
yanchen036/tensorflow | tensorflow/python/estimator/canned/dnn_linear_combined_test.py | 11 | 33691 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn_linear_combined.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import dnn_linear_combined
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import linear_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer as optimizer_lib
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn)
def _dnn_only_model_fn(self,
features,
labels,
mode,
head,
hidden_units,
feature_columns,
optimizer='Adagrad',
activation_fn=nn.relu,
dropout=None,
input_layer_partitioner=None,
config=None):
return dnn_linear_combined._dnn_linear_combined_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
linear_feature_columns=[],
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
dnn_activation_fn=activation_fn,
dnn_dropout=dropout,
input_layer_partitioner=input_layer_partitioner,
config=config)
# A function to mimic linear-regressor init reuse same tests.
def _linear_regressor_fn(feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyRegressorPartitionerTest(
linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorEvaluationTest(
linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorPredictTest(
linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorIntegrationTest(
linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn)
class LinearOnlyRegressorTrainingTest(
linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn)
def _linear_classifier_fn(feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Ftrl',
config=None,
partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=feature_columns,
linear_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=partitioner,
config=config)
class LinearOnlyClassifierTrainingTest(
linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierClassesEvaluationTest(
linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierPredictTest(
linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class LinearOnlyClassifierIntegrationTest(
linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn)
class DNNLinearCombinedRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
# A function to mimic dnn-classifier init reuse same tests.
def _dnn_classifier_fn(hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedClassifier(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
n_classes=n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNOnlyClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
# A function to mimic dnn-regressor init reuse same tests.
def _dnn_regressor_fn(hidden_units,
feature_columns,
model_dir=None,
label_dimension=1,
weight_column=None,
optimizer='Adagrad',
config=None,
input_layer_partitioner=None):
return dnn_linear_combined.DNNLinearCombinedRegressor(
model_dir=model_dir,
dnn_hidden_units=hidden_units,
dnn_feature_columns=feature_columns,
dnn_optimizer=optimizer,
label_dimension=label_dimension,
weight_column=weight_column,
input_layer_partitioner=input_layer_partitioner,
config=config)
class DNNOnlyRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNOnlyRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
class DNNLinearCombinedClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
feature_columns = linear_feature_columns + dnn_feature_columns
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1)))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 2
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes-1., batch_size * input_dimension,
dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = linear_testing_utils.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
class DNNLinearCombinedTests(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, real_optimizer, var_name_prefix):
"""Verifies global_step is None and var_names start with given prefix."""
def _minimize(loss, global_step=None, var_list=None):
self.assertIsNone(global_step)
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
var_names = [var.name for var in trainable_vars]
self.assertTrue(
all([name.startswith(var_name_prefix) for name in var_names]))
# var is used to check this op called by training.
with ops.name_scope(''):
var = variables_lib.Variable(0., name=(var_name_prefix + '_called'))
with ops.control_dependencies([var.assign(100.)]):
return real_optimizer.minimize(loss, global_step, var_list)
optimizer_mock = test.mock.NonCallableMagicMock(
spec=optimizer_lib.Optimizer, wraps=real_optimizer)
optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize)
return optimizer_mock
def test_train_op_calls_both_dnn_and_linear(self):
opt = gradient_descent.GradientDescentOptimizer(1.)
x_column = feature_column.numeric_column('x')
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[0.], [1.]])},
y=np.array([[0.], [1.]]),
batch_size=1,
shuffle=False)
est = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[x_column],
# verifies linear_optimizer is used only for linear part.
linear_optimizer=self._mock_optimizer(opt, 'linear'),
dnn_hidden_units=(2, 2),
dnn_feature_columns=[x_column],
# verifies dnn_optimizer is used only for linear part.
dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
model_dir=self._model_dir)
est.train(input_fn, steps=1)
# verifies train_op fires linear minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'linear_called'))
# verifies train_op fires dnn minimize op
self.assertEqual(100.,
checkpoint_utils.load_variable(
self._model_dir, 'dnn_called'))
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
class DNNLinearCombinedWarmStartingTest(test.TestCase):
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'city': [['Palo Alto'], ['Mountain View']],
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
"""Tests correctness of DNNLinearCombinedClassifier default warm-start."""
age = feature_column.numeric_column('age')
city = feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
# Create a DNNLinearCombinedClassifier and train to save a checkpoint.
dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
linear_optimizer='SGD',
dnn_optimizer='SGD')
dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second DNNLinearCombinedClassifier, warm-started from the first.
# Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't
# have accumulator values that change).
warm_started_dnn_lc_classifier = (
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
n_classes=4,
linear_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
dnn_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=dnn_lc_classifier.model_dir))
warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_dnn_lc_classifier.get_variable_names():
self.assertAllClose(
dnn_lc_classifier.get_variable_value(variable_name),
warm_started_dnn_lc_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
"""Tests correctness of DNNLinearCombinedRegressor default warm-start."""
age = feature_column.numeric_column('age')
city = feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
# Create a DNNLinearCombinedRegressor and train to save a checkpoint.
dnn_lc_regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
model_dir=self._ckpt_and_vocab_dir,
linear_optimizer='SGD',
dnn_optimizer='SGD')
dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1)
# Create a second DNNLinearCombinedRegressor, warm-started from the first.
# Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't
# have accumulator values that change).
warm_started_dnn_lc_regressor = (
dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
linear_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
dnn_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=dnn_lc_regressor.model_dir))
warm_started_dnn_lc_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_dnn_lc_regressor.get_variable_names():
self.assertAllClose(
dnn_lc_regressor.get_variable_value(variable_name),
warm_started_dnn_lc_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
age = feature_column.numeric_column('age')
city = feature_column.embedding_column(
feature_column.categorical_column_with_vocabulary_list(
'city', vocabulary_list=['Mountain View', 'Palo Alto']),
dimension=5)
# Create a DNNLinearCombinedClassifier and train to save a checkpoint.
dnn_lc_classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
linear_optimizer='SGD',
dnn_optimizer='SGD')
dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second DNNLinearCombinedClassifier, warm-started from the first.
# Use a learning_rate = 0.0 optimizer to check values (use SGD so we don't
# have accumulator values that change).
warm_started_dnn_lc_classifier = (
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age],
dnn_feature_columns=[city],
dnn_hidden_units=[256, 128],
n_classes=4,
linear_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
dnn_optimizer=gradient_descent.GradientDescentOptimizer(
learning_rate=0.0),
# The provided regular expression will only warm-start the deep
# portion of the model.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=dnn_lc_classifier.model_dir,
vars_to_warm_start='.*(dnn).*')))
warm_started_dnn_lc_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_dnn_lc_classifier.get_variable_names():
if 'dnn' in variable_name:
self.assertAllClose(
dnn_lc_classifier.get_variable_value(variable_name),
warm_started_dnn_lc_classifier.get_variable_value(variable_name))
elif 'linear' in variable_name:
linear_values = warm_started_dnn_lc_classifier.get_variable_value(
variable_name)
# Since they're not warm-started, the linear weights will be
# zero-initialized.
self.assertAllClose(np.zeros_like(linear_values), linear_values)
if __name__ == '__main__':
test.main()
| apache-2.0 |
rkmaddox/mne-python | mne/externals/tqdm/_tqdm/__init__.py | 14 | 1663 | from .std import tqdm, trange
from .gui import tqdm as tqdm_gui # TODO: remove in v5.0.0
from .gui import trange as tgrange # TODO: remove in v5.0.0
from ._tqdm_pandas import tqdm_pandas
from .cli import main # TODO: remove in v5.0.0
from ._monitor import TMonitor, TqdmSynchronisationWarning
from ._version import __version__ # NOQA
from .std import TqdmTypeError, TqdmKeyError, TqdmWarning, \
TqdmDeprecationWarning, TqdmExperimentalWarning, \
TqdmMonitorWarning
__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
'TqdmTypeError', 'TqdmKeyError',
'TqdmWarning', 'TqdmDeprecationWarning',
'TqdmExperimentalWarning',
'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
'__version__']
def tqdm_notebook(*args, **kwargs): # pragma: no cover
"""See tqdm.notebook.tqdm for full documentation"""
from .notebook import tqdm as _tqdm_notebook
from warnings import warn
warn("This function will be removed in tqdm==5.0.0\n"
"Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`",
TqdmDeprecationWarning, stacklevel=2)
return _tqdm_notebook(*args, **kwargs)
def tnrange(*args, **kwargs): # pragma: no cover
"""
A shortcut for `tqdm.notebook.tqdm(xrange(*args), **kwargs)`.
On Python3+, `range` is used instead of `xrange`.
"""
from .notebook import trange as _tnrange
from warnings import warn
warn("Please use `tqdm.notebook.trange` instead of `tqdm.tnrange`",
TqdmDeprecationWarning, stacklevel=2)
return _tnrange(*args, **kwargs)
| bsd-3-clause |
ankurankan/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
HBPNeurorobotics/nest-simulator | pynest/examples/intrinsic_currents_subthreshold.py | 4 | 7182 | # -*- coding: utf-8 -*-
#
# intrinsic_currents_subthreshold.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Intrinsic currents subthreshold
-------------------------------
This example illustrates how to record from a model with multiple
intrinsic currents and visualize the results. This is illustrated
using the `ht_neuron` which has four intrinsic currents: I_NaP,
I_KNa, I_T, and I_h. It is a slightly simplified implementation of
neuron model proposed in Hill and Tononi (2005) **Modeling Sleep
and Wakefulness in the Thalamocortical System** *J Neurophysiol* 93:1671
http://dx.doi.org/10.1152/jn.00915.2004 .
The neuron is driven by DC current, which is alternated
between depolarizing and hyperpolarizing. Hyperpolarization
intervals become increasingly longer.
See also: intrinsic_currents_spiking.py
'''
'''
We imported all necessary modules for simulation, analysis and
plotting.
'''
import nest
import numpy as np
import matplotlib.pyplot as plt
'''
Additionally, we set the verbosity using `set_verbosity` to
suppress info messages. We also reset the kernel to be sure to start
with a clean NEST.
'''
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
'''
We define simulation parameters:
- The length of depolarization intervals
- The length of hyperpolarization intervals
- The amplitude for de- and hyperpolarizing currents
- The end of the time window to plot
'''
n_blocks = 5
t_block = 20.
t_dep = [t_block] * n_blocks
t_hyp = [t_block * 2 ** n for n in range(n_blocks)]
I_dep = 10.
I_hyp = -5.
t_end = 500.
'''
We create the one neuron instance and the DC current generator
and store the returned handles.
'''
nrn = nest.Create('ht_neuron')
dc = nest.Create('dc_generator')
'''
We create a multimeter to record
- membrane potential `V_m`
- threshold value `Theta`
- intrinsic currents `I_NaP`, `I_KNa`, `I_T`, `I_h`
by passing these names in the `record_from` list.
To find out which quantities can be recorded from a given neuron,
run::
nest.GetDefaults('ht_neuron')['recordables']
The result will contain an entry like::
<SLILiteral: V_m>
for each recordable quantity. You need to pass the value of the `SLILiteral`,
in this case `V_m` in the `record_from` list.
We want to record values with 0.1 ms resolution, so we set the
recording interval as well; the default recording resolution is 1 ms.
'''
# create multimeter and configure it to record all information
# we want at 0.1ms resolution
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'Theta',
'I_NaP', 'I_KNa', 'I_T', 'I_h']}
)
'''
We connect the DC generator and the multimeter to the neuron.
Note that the multimeter, just like the voltmeter is connected
to the neuron, not the neuron to the multimeter.
'''
nest.Connect(dc, nrn)
nest.Connect(mm, nrn)
'''
We are ready to simulate. We alternate between driving the neuron
with depolarizing and hyperpolarizing currents. Before each simulation
interval, we set the amplitude of the DC generator to the correct value.
'''
for t_sim_dep, t_sim_hyp in zip(t_dep, t_hyp):
nest.SetStatus(dc, {'amplitude': I_dep})
nest.Simulate(t_sim_dep)
nest.SetStatus(dc, {'amplitude': I_hyp})
nest.Simulate(t_sim_hyp)
'''
We now fetch the data recorded by the multimeter. The data are
returned as a dictionary with entry ``'times'`` containing timestamps
for all recorded data, plus one entry per recorded quantity.
All data is contained in the ``'events'`` entry of the status dictionary
returned by the multimeter. Because all NEST function return arrays,
we need to pick out element ``0`` from the result of `GetStatus`.
'''
data = nest.GetStatus(mm)[0]['events']
t = data['times']
'''
The next step is to plot the results. We create a new figure, add a
single subplot and plot at first membrane potential and threshold.
'''
fig = plt.figure()
Vax = fig.add_subplot(111)
Vax.plot(t, data['V_m'], 'b-', lw=2, label=r'$V_m$')
Vax.plot(t, data['Theta'], 'g-', lw=2, label=r'$\Theta$')
Vax.set_ylim(-80., 0.)
Vax.set_ylabel('Voltageinf [mV]')
Vax.set_xlabel('Time [ms]')
'''
To plot the input current, we need to create an input
current trace. We construct it from the durations of the de- and
hyperpolarizing inputs and add the delay in the connection between
DC generator and neuron:
1. We find the delay by checking the status of the dc->nrn connection.
1. We find the resolution of the simulation from the kernel status.
1. Each current interval begins one time step after the previous interval,
is delayed by the delay and effective for the given duration.
1. We build the time axis incrementally. We only add the delay when adding
the first time point after t=0. All subsequent points are then automatically
shifted by the delay.
'''
delay = nest.GetStatus(nest.GetConnections(dc, nrn))[0]['delay']
dt = nest.GetKernelStatus('resolution')
t_dc, I_dc = [0], [0]
for td, th in zip(t_dep, t_hyp):
t_prev = t_dc[-1]
t_start_dep = t_prev + dt if t_prev > 0 else t_prev + dt + delay
t_end_dep = t_start_dep + td
t_start_hyp = t_end_dep + dt
t_end_hyp = t_start_hyp + th
t_dc.extend([t_start_dep, t_end_dep, t_start_hyp, t_end_hyp])
I_dc.extend([I_dep, I_dep, I_hyp, I_hyp])
'''
The following function turns a name such as I_NaP into proper TeX code
$I_{\mathrm{NaP}}$ for a pretty label.
'''
def texify_name(name):
return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
'''
Next, we add a right vertical axis and plot the currents with respect
to that axis.
'''
Iax = Vax.twinx()
Iax.plot(t_dc, I_dc, 'k-', lw=2, label=texify_name('I_DC'))
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
Iax.set_xlim(0, t_end)
Iax.set_ylim(-10., 15.)
Iax.set_ylabel('Current [pA]')
Iax.set_title('ht_neuron driven by DC current')
'''
We need to make a little extra effort to combine lines from the two axis
into one legend.
'''
lines_V, labels_V = Vax.get_legend_handles_labels()
lines_I, labels_I = Iax.get_legend_handles_labels()
try:
Iax.legend(lines_V + lines_I, labels_V + labels_I, fontsize='small')
except TypeError:
# work-around for older Matplotlib versions
Iax.legend(lines_V + lines_I, labels_V + labels_I)
'''
Note that I_KNa is not activated in this example because the neuron does
not spike. I_T has only a very small amplitude.
'''
| gpl-2.0 |
simon-pepin/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
alekz112/statsmodels | statsmodels/tsa/statespace/tests/test_representation.py | 6 | 19651 | """
Tests for representation module
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
from statsmodels.tsa.statespace.representation import Representation
from statsmodels.tsa.statespace.model import Model
from .results import results_kalman_filter
from numpy.testing import assert_equal, assert_almost_equal, assert_raises, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Construct the statespace representation
k_states = 4
self.model = Model(data['lgdp'], k_states=k_states, **kwargs)
self.model.design[:, :, 0] = [1, 1, 0, 0]
self.model.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters']
)
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: modification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[self.true['start']:].sum(),
self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.run_filter()
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.run_filter()
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.r_[self.model.endog[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.model.loglikelihood_burn = self.true['start']
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, **kwargs):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
k_states = 6
self.model = Model(data, k_states=k_states, **kwargs)
# Statespace representation
self.model.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
self.model.transition[
([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])
] = [1, 1, 0, 0, 1, 1, 1, 1]
self.model.selection = np.eye(self.model.k_states)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'],
)
self.model.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.model.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.model.obs_cov[1, 1, 0] = sigma_ec**2
self.model.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization
initial_state = np.zeros((k_states,))
initial_state_cov = np.eye(k_states)*100
# Initialization: self.modelification
initial_state_cov = np.dot(
np.dot(self.model.transition[:, :, 0], initial_state_cov),
self.model.transition[:, :, 0].T
)
self.model.initialize_known(initial_state, initial_state_cov)
def run_filter(self):
# Filter the data
self.results = self.model.filter()
def test_loglike(self):
assert_almost_equal(
# self.results.llf_obs[self.true['start']:].sum(),
self.results.llf_obs[0:].sum(),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989, self).__init__(dtype=float, conserve_memory=0)
self.run_filter()
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, self).__init__(
dtype=dtype, conserve_memory=conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self.model.endog = np.array(
np.c_[
self.model.endog,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
self.model.nobs = self.model.endog.shape[1]
self.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.results.filtered_state[0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1989ForecastDouble, self).__init__()
self.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
# self.model.loglikelihood_burn = self.true['start']
self.model.loglikelihood_burn = 0
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.results.llf_obs[0], self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.results.filtered_state[0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.results.filtered_state[1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.results.filtered_state[4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.results.filtered_state[5][-1],
self.true_states.iloc[end-1, 3], 4
)
# Miscellaneous coverage-related tests
def test_slice_notation():
endog = np.arange(10)*1.0
mod = Model(endog, k_states=2)
# Test invalid __setitem__
def set_designs():
mod['designs'] = 1
def set_designs2():
mod['designs',0,0] = 1
def set_designs3():
mod[0] = 1
assert_raises(IndexError, set_designs)
assert_raises(IndexError, set_designs2)
assert_raises(IndexError, set_designs3)
# Test invalid __getitem__
assert_raises(IndexError, lambda: mod['designs'])
assert_raises(IndexError, lambda: mod['designs',0,0,0])
assert_raises(IndexError, lambda: mod[0])
# Test valid __setitem__, __getitem__
assert_equal(mod.design[0,0,0], 0)
mod['design',0,0,0] = 1
assert_equal(mod['design'].sum(), 1)
assert_equal(mod.design[0,0,0], 1)
assert_equal(mod['design',0,0,0], 1)
# Test valid __setitem__, __getitem__ with unspecified time index
mod['design'] = np.zeros(mod['design'].shape)
assert_equal(mod.design[0,0], 0)
mod['design',0,0] = 1
assert_equal(mod.design[0,0], 1)
assert_equal(mod['design',0,0], 1)
def test_representation():
# Test an invalid number of states
def zero_kstates():
mod = Representation(1, 0)
assert_raises(ValueError, zero_kstates)
# Test an invalid endogenous array
def empty_endog():
endog = np.zeros((0,0))
mod = Representation(endog, k_states=2)
assert_raises(ValueError, empty_endog)
# Test a Fortran-ordered endogenous array (which will be assumed to be in
# wide format: k_endog x nobs)
nobs = 10
k_endog = 2
endog = np.asfortranarray(np.arange(nobs*k_endog).reshape(k_endog,nobs)*1.)
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test a C-ordered endogenous array (which will be assumed to be in
# tall format: nobs x k_endog)
nobs = 10
k_endog = 2
endog = np.arange(nobs*k_endog).reshape(nobs,k_endog)*1.
mod = Representation(endog, k_states=2)
assert_equal(mod.nobs, nobs)
assert_equal(mod.k_endog, k_endog)
# Test getting the statespace representation
assert_equal(mod._statespace, None)
mod._initialize_representation()
assert(mod._statespace is not None)
def test_bind():
mod = Representation(1, k_states=2)
# Test invalid endogenous array (it must be ndarray)
assert_raises(ValueError, lambda: mod.bind([1,2,3,4]))
# Test valid (nobs x 1) endogenous array
mod.bind(np.arange(10)*1.)
assert_equal(mod.nobs, 10)
# Test valid (k_endog x 0) endogenous array
mod.bind(np.zeros(0,dtype=np.float64))
# Test invalid (3-dim) endogenous array
assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.))
# Test valid F-contiguous
mod.bind(np.asfortranarray(np.arange(10).reshape(1,10)))
assert_equal(mod.nobs, 10)
# Test valid C-contiguous
mod.bind(np.arange(10).reshape(10,1))
assert_equal(mod.nobs, 10)
# Test invalid F-contiguous
assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(10,1))))
# Test invalid C-contiguous
assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(1,10)))
def test_initialization():
mod = Representation(1, k_states=2)
# Test invalid state initialization
assert_raises(RuntimeError, lambda: mod._initialize_state())
# Test valid initialization
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(2) * 3.
mod.initialize_known(initial_state, initial_state_cov)
assert_equal(mod._initial_state.sum(), 3)
assert_equal(mod._initial_state_cov.diagonal().sum(), 6)
# Test invalid initial_state
initial_state = np.zeros(10,)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
initial_state = np.zeros((10,10))
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
# Test invalid initial_state_cov
initial_state = np.zeros(2,) + 1.5
initial_state_cov = np.eye(3)
assert_raises(ValueError, lambda: mod.initialize_known(initial_state, initial_state_cov))
| bsd-3-clause |
AIML/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
philouc/pyhrf | python/pyhrf/validation/test_rndm_field.py | 1 | 12934 |
import os
import unittest
import numpy as _np
from pyhrf.jde.beta import *
from pyhrf.boldsynth.field import *
from pyhrf.graph import *
from pyhrf.tools import montecarlo
from pyhrf.validation import config
from pyhrf.validation.config import figfn
class field_energy_calculator:
def __init__(self, graph):
self.graph = graph
def __call__(self, labels):
hc = count_homo_cliques(self.graph, labels)
return -float(hc)/len(self.graph)
class PottsTest(unittest.TestCase):
def setUp(self):
self.plot = config.savePlots
self.outDir = os.path.join(config.plotSaveDir,
'./PottsPrior')
if self.plot and not os.path.exists(self.outDir):
os.makedirs(self.outDir)
self.verbose = False
def test_sw_nrj(self):
size = 100
shape = (int(size**.5), int(size**.5))
mask = _np.ones(shape, dtype=int) #full mask
g = graph_from_lattice(mask, kerMask=kerMask2D_4n)
nc = 2
betas = _np.arange(0, 1.4, .2)
mU = _np.zeros(len(betas))
vU = _np.zeros(len(betas))
nrjCalc = field_energy_calculator(g)
for ib, b in enumerate(betas):
#print 'MC for beta ', b
pottsGen = potts_generator(graph=g, beta=b, nbLabels=nc,
method='SW')
mU[ib], vU[ib] = montecarlo(pottsGen, nrjCalc, nbit=40)
if config.savePlots:
import matplotlib.pyplot as plt
plt.plot(betas, mU)
plt.errorbar(betas, mU, vU**.5)
plt.xlabel('beta')
plt.ylabel('mean U per site')
plt.show()
#print mU
#print vU
# assert max (d U(beta)) == 0.88
def test_SW_nrj(self):
size = 100
shape = (int(size**.5), int(size**.5))
mask = _np.ones(shape, dtype=int) #full mask
g = graph_from_lattice(mask, kerMask=kerMask2D_4n)
nc = 2
betas = _np.arange(0, 2.5, .2)
mU = _np.zeros(len(betas))
vU = _np.zeros(len(betas))
nrjCalc = field_energy_calculator(g)
for ib, b in enumerate(betas):
#print 'MC for beta ', b
pottsGen = potts_generator(graph=g, beta=b, nbLabels=nc,
method='SW')
mU[ib], vU[ib] = montecarlo(pottsGen, nrjCalc, nbit=5)
import matplotlib as plt
plt.plot(betas, mU,'b-')
plt.errorbar(betas, mU, vU**.5,fmt=None,ecolor='b')
for ib, b in enumerate(betas):
#print 'MC for beta ', b
pottsGen = potts_generator(graph=g, beta=b, nbLabels=3,
method='SW')
mU[ib], vU[ib] = montecarlo(pottsGen, nrjCalc, nbit=5)
if config.savePlots:
plt.plot(betas, mU,'r-')
plt.errorbar(betas, mU, vU**.5,fmt=None,ecolor='r')
plt.xlabel('beta')
plt.ylabel('mean U per site')
plt.xlim(betas[0]-.1,betas[-1]*1.05)
plt.show()
#print mU
#print vU
# assert max (d U(beta)) == 0.88
def test_SW_nrj_2C_3C(self):
size = 400
shape = (int(size**.5), int(size**.5))
mask = _np.ones(shape, dtype=int) #full mask
g = graph_from_lattice(mask, kerMask=kerMask2D_4n)
betas = _np.arange(0, 2.7, .2)
nitMC = 100
mU2C = _np.zeros(len(betas))
vU2C = _np.zeros(len(betas))
mU3C = _np.zeros(len(betas))
vU3C = _np.zeros(len(betas))
nrjCalc = field_energy_calculator(g)
#print "nbClasses = 2"
for ib, b in enumerate(betas):
#print ' MC for beta ', b
pottsGen = potts_generator(graph=g, beta=b, nbLabels=2,
method='SW')
mU2C[ib], vU2C[ib] = montecarlo(pottsGen, nrjCalc, nbit=nitMC)
#print ' mu2C=',mU2C
#print ' vU2C=',vU2C
#print "nbClasses = 3"
for ib, b in enumerate(betas):
#print ' MC for beta ', b
pottsGen = potts_generator(graph=g, beta=b, nbLabels=3,
method='SW')
mU3C[ib], vU3C[ib] = montecarlo(pottsGen, nrjCalc, nbit=nitMC)
#print ' mu3C=',mU3C
#print ' vU3C=',vU3C
if config.savePlots:
import matplotlib.pyplot as plt
plt.plot(betas, mU2C,'b-',label="2C")
plt.errorbar(betas, mU2C, vU2C**.5,fmt=None,ecolor='b')
plt.plot(betas, mU3C,'r-',label="3C")
plt.errorbar(betas, mU3C, vU3C**.5,fmt=None,ecolor='r')
plt.legend(loc='upper right')
plt.title('Mean energy in terms of beta \n for 2-color and 3-color Potts (SW sampling)')
plt.xlabel('beta')
plt.ylabel('mean U per site')
plt.xlim(betas[0]-.1,betas[-1]*1.05)
figFn = os.path.join(self.outDir, figfn('potts_energy_2C_3C'))
#print figFn
plt.savefig(figFn)
#plt.show()
# assert max (d U2C(beta)) == 0.88
def test_sw_sampling(self):
# assert proba(site) = 1/2
pass
def test_gibbs(self):
# plot nrj(beta)
# assert max (d U(beta)) == 0.88
pass
class PartitionFunctionTest(unittest.TestCase):
def setUp(self):
self.plot = config.savePlots
self.outDir = os.path.join(config.plotSaveDir,
'./PottsPartitionFunction')
if self.plot and not os.path.exists(self.outDir):
os.makedirs(self.outDir)
self.verbose = True
def test_onsager1(self):
size = 10000
beta = .3
pf = logpf_ising_onsager(size, beta)
assert _np.allclose(logpf_ising_onsager(size, 0.), _np.log(2)*size)
def test_onsager(self):
size = 900
dbeta = 0.001
beta = _np.arange(0., 2., dbeta)
pf = logpf_ising_onsager(size, beta)
dpf = _np.diff(pf)/dbeta
d1beta = beta[1:]
d2pf = _np.diff(dpf)/dbeta
d2beta = beta[2:]
if self.plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(beta, pf/size, label='logZ')
plt.plot(beta[1:], dpf/size, label='dlogZ')
plt.plot(beta[2:], d2pf/size, label='d2logZ')
plt.xlabel('beta')
plt.legend(loc='upper left')
plt.title('Log partition function per site and its derivatives' \
'.\nObtained with Onsager equations')
figFn = os.path.join(self.outDir, figfn('logPF_onsager'))
plt.savefig(figFn)
#plt.show()
#critical value:
if self.verbose:
#print 'critical beta:', d2beta[_np.argmax(d2pf)]
#print 'beta grid precision:', dbeta
assert _np.abs(d2beta[_np.argmax(d2pf)] - 0.88) <= 0.005
def test_path_sampling(self):
size = 900
shape = (int(size**.5), int(size**.5))
mask = _np.ones(shape, dtype=int) #full mask
g = graph_from_lattice(mask, kerMask=kerMask2D_4n)
pf, beta = Cpt_Vec_Estim_lnZ_Graph(g,2)
dbeta = beta[1]-beta[0]
dpf = _np.diff(pf)/dbeta
d1beta = beta[1:]
d2pf = _np.diff(dpf)/dbeta
d2beta = beta[2:]
if self.plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(beta, pf/size, label='logZ')
plt.plot(beta[1:], dpf/size, label='dlogZ')
plt.plot(beta[2:], d2pf/size, label='d2logZ')
plt.xlabel('beta')
plt.legend(loc='upper left')
plt.title('Log partition function per site and its derivatives' \
'.\nDiscretized using Path Sampling')
#print '##### ',
figFn = os.path.join(self.outDir, figfn('logPF_PS'))
plt.savefig(figFn)
#plt.show()
#critical value:
if self.verbose:
print 'critical beta:', d2beta[_np.argmax(d2pf)]
print 'beta grid precision:', dbeta
assert _np.abs(d2beta[_np.argmax(d2pf)] - 0.88) <= dbeta
def test_extrapolation(self):
size = 900
shape = (int(size**.5), int(size**.5))
mask = _np.ones(shape, dtype=int) #full mask
g = graph_from_lattice(mask, kerMask=kerMask2D_4n)
pf, beta = Cpt_Vec_Estim_lnZ_Graph_fast(g,2)
dbeta = beta[1]-beta[0]
dpf = _np.diff(pf)/dbeta
d1beta = beta[1:]
d2pf = _np.diff(dpf)/dbeta
d2beta = beta[2:]
if self.plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(beta, pf/size, label='logZ')
plt.plot(beta[1:], dpf/size, label='dlogZ')
plt.plot(beta[2:], d2pf/size, label='d2logZ')
plt.xlabel('beta')
plt.legend(loc='upper left')
plt.title('Log partition function per site and its derivatives' \
'.\nDiscretized using the Extrapolation Scheme.')
figFn = os.path.join(self.outDir, figfn('logPF_ES'))
plt.savefig(figFn)
#plt.show()
#critical value:
if self.verbose:
print 'critical beta:', d2beta[_np.argmax(d2pf)]
print 'beta grid precision:', dbeta
assert _np.abs(d2beta[_np.argmax(d2pf)] - 0.88) <= dbeta
def test_comparison(self):
size = 1000
shape = (int(size**.5), int(size**.5))
mask = _np.ones(shape, dtype=int) #full mask
g = graph_from_lattice(mask, kerMask=kerMask2D_4n,toroidal=True)
dbeta = 0.05
# ES
pfES, betaES = Cpt_Vec_Estim_lnZ_Graph_fast3(g,2,BetaStep=dbeta)
if self.verbose:
print 'betaES:', betaES
pfES = pfES[:-1]
if self.verbose:
print 'pfES:', len(pfES)
#print pfES
dpfES = _np.diff(pfES)/dbeta
#print 'dpfES:'
#print _np.diff(pfES)
d2pfES = _np.diff(dpfES)/dbeta
# Path Sampling
pfPS, beta = Cpt_Vec_Estim_lnZ_Graph(g,2,BetaStep=dbeta,SamplesNb=30)
if self.verbose:
print 'beta grid from PS:', beta
dpfPS = _np.diff(pfPS)/dbeta
d1beta = beta[1:]
d2pfPS = _np.diff(dpfPS)/dbeta
d2beta = beta[2:]
# Onsager
if self.verbose: print 'Onsager ...'
pfOns = logpf_ising_onsager(size, beta)*.96
dpfOns = _np.diff(pfOns)/dbeta
d2pfOns = _np.diff(dpfOns)/dbeta
if self.plot:
if self.verbose: print 'Plots ...'
import matplotlib.pyplot as plt
# PF plots
plt.figure()
plt.plot(beta, pfES, 'r-+', label='logZ-ES')
plt.plot(beta, pfPS, 'b', label='logZ-PS')
plt.plot(beta, pfOns,'g', label='logZ-Onsager')
#plt.xlabel('beta')
#plt.legend(loc='upper left')
#plt.title('Log partition function per site - comparison')
figFn = os.path.join(self.outDir, figfn('logPF_ES_PS_Ons'))
print 'saved:', figFn
plt.savefig(figFn)
plt.figure()
plt.plot(d1beta, dpfES/size, 'r-+', label='dlogZ-ES')
plt.plot(d1beta, dpfPS/size, 'b', label='dlogZ-PS')
plt.plot(d1beta, dpfOns/size,'g', label='dlogZ-Onsager')
plt.xlabel('beta')
plt.legend(loc='upper left')
plt.title('dLog partition function per site - comparison')
figFn = os.path.join(self.outDir, figfn('dlogPF_ES_PS_Ons'))
print 'saved:', figFn
plt.savefig(figFn)
plt.figure()
plt.plot(d2beta, d2pfES/size, 'r-+', label='d2logZ-ES')
plt.plot(d2beta, d2pfPS/size, 'b', label='d2logZ-PS')
plt.plot(d2beta, d2pfOns/size,'g', label='d2logZ-Onsager')
plt.xlabel('beta')
plt.legend(loc='upper left')
plt.title('d2Log partition function per site - comparison')
figFn = os.path.join(self.outDir, figfn('d2logPF_ES_PS_Ons'))
print 'saved:', figFn
plt.savefig(figFn)
plt.figure()
plt.plot(beta, _np.abs(pfES-pfOns)/size, 'r-+',
label='|logZ_ES-logZ-Ons|')
plt.plot(beta, _np.abs(pfPS-pfOns)/size, 'b',
label='|logZ_PS-logZ-Ons|')
plt.xlabel('beta')
plt.legend(loc='upper left')
plt.title('Error of Log partition function per site')
figFn = os.path.join(self.outDir, figfn('logPF_error_ES_PS'))
print 'saved:', figFn
plt.savefig(figFn)
#plt.show()
| gpl-3.0 |
gfyoung/pandas | pandas/tests/indexes/timedeltas/methods/test_astype.py | 3 | 4365 | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Float64Index,
Index,
Int64Index,
NaT,
Timedelta,
TimedeltaIndex,
timedelta_range,
)
import pandas._testing as tm
class TestTimedeltaIndex:
def test_astype_object(self):
idx = timedelta_range(start="1 days", periods=4, freq="D", name="idx")
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
Timedelta("3 days"),
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex(
[timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name="idx"
)
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
NaT,
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN], name="idx")
result = idx.astype(object)
expected = Index(
[Timedelta("1 days 03:46:40")] + [NaT] * 3, dtype=object, name="idx"
)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = idx.astype(int)
expected = Int64Index(
[100000000000000] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index([str(x) for x in idx], name="idx")
tm.assert_index_equal(result, expected)
rng = timedelta_range("1 days", periods=10)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = rng.astype("i8")
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range("1H", periods=2)
expected = pd.UInt64Index(
np.array([3600000000000, 90000000000000], dtype="uint64")
)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN])
result = idx.astype("timedelta64")
expected = Float64Index([1e14] + [np.NaN] * 3, dtype="float64")
tm.assert_index_equal(result, expected)
result = idx.astype("timedelta64[ns]")
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype("timedelta64[ns]", copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize("dtype", [float, "datetime64", "datetime64[ns]"])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN])
msg = "Cannot cast TimedeltaArray to dtype"
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = timedelta_range("1H", periods=2, freq="H")
result = obj.astype("category")
expected = pd.CategoricalIndex([Timedelta("1H"), Timedelta("2H")])
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
miltondp/ukbrest | tests/utils.py | 1 | 1724 | import unittest
from os.path import dirname, abspath, join
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE
def get_repository_path(data_filename):
directory = dirname(abspath(__file__))
directory = join(directory, 'data/')
return join(directory, data_filename)
def get_full_path(filename):
root_dir = dirname(dirname(abspath(__file__)))
return join(root_dir, filename)
class DBTest(unittest.TestCase):
def setUp(self):
super(DBTest, self).setUp()
# wipe postgresql tables
sql_st = """
select 'drop table if exists "' || tablename || '" cascade;' from pg_tables where schemaname = 'public';
"""
db_engine = create_engine(POSTGRESQL_ENGINE)
tables = pd.read_sql(sql_st, db_engine)
with db_engine.connect() as con:
for idx, drop_table_st in tables.iterrows():
con.execute(drop_table_st.iloc[0])
def _get_table_contrains(self, table_name, column_query='%%', relationship_query='%%'):
return """
select t.relname as table_name, i.relname as index_name, a.attname as column_name
from pg_class t, pg_class i, pg_index ix, pg_attribute a
where
t.oid = ix.indrelid
and i.oid = ix.indexrelid
and a.attrelid = t.oid
and a.attnum = ANY(ix.indkey)
and t.relkind = 'r'
and t.relname = '{table_name}' and a.attname like '{column_query}' and i.relname like '{relationship_query}'
""".format(
table_name=table_name,
column_query=column_query,
relationship_query=relationship_query,
)
| gpl-3.0 |
FluidityProject/fluidity | tests/mms_rans_p2p1_keps/function_printer.py | 2 | 1111 | from mms_rans_p2p1_keps_tools import *
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
import sys
'''
run using:
python3 function_printer.py AA BB CC DD .. n_rows
where:
AA, BB, CC, DD are names of functions in mms_rans_p2p1_keps_tools.py (any number can be entered)
n_rows is the number of rows to display the functions on
'''
functions = []
for arg in sys.argv[1:-1]:
functions.append(arg)
n_rows = int(sys.argv[-1])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0.2, hspace=0.2)
res = 50
X = linspace(0.0, pi, res)
Y = linspace(0.0, pi, res)
x = [0,0]
data = empty([len(functions), res, res])
for z, function in enumerate(functions):
for j, x[0] in enumerate(X):
for i, x[1] in enumerate(Y):
data[z,i,j] = eval(function + '(x)')
plt.subplot(n_rows, len(functions)/n_rows + 1, z+1)
CS = plt.contour(X, Y, data[z])
plt.clabel(CS, inline=1, fontsize=10)
plt.title(functions[z])
plt.show()
| lgpl-2.1 |
jkarnows/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
rigdenlab/conkit | conkit/plot/contactdensity.py | 2 | 5851 | # BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A module to produce a domain boundary plot"""
from __future__ import division
from __future__ import print_function
__author__ = "Felix Simkovic"
__date__ = "23 Feb 2017"
__version__ = "0.1"
import matplotlib.pyplot as plt
import numpy as np
from conkit.misc import deprecate
from conkit.plot.figure import Figure
from conkit.plot.tools import ColorDefinitions
from conkit.plot.tools import find_minima
from conkit.plot.tools import _isinstance
class ContactDensityFigure(Figure):
"""A Figure object specifically for a contact density illustration.
This figure is an adaptation of the algorithm published by Sadowski
(2013) [#]_.
.. [#] Sadowski M. (2013). Prediction of protein domain boundaries
from inverse covariances. Proteins 81(2), 253-260.
Attributes
----------
hierarchy : :obj:`~conkit.core.contactmap.ContactMap`
The default contact map hierarchy
bw_method : str
The method to estimate the bandwidth
Examples
--------
>>> import conkit
>>> cmap = conkit.io.read('toxd/toxd.mat', 'ccmpred').top_map
>>> conkit.plot.ContactDensityFigure(cmap)
"""
def __init__(self, hierarchy, bw_method="bowman", **kwargs):
"""A new contact density plot
Parameters
----------
hierarchy : :obj:`~conkit.core.contactmap.ContactMap`
The default contact map hierarchy
bw_method : str, optional
The method to estimate the bandwidth [default: bowman]
**kwargs
General :obj:`~conkit.plot.figure.Figure` keyword arguments
"""
super(ContactDensityFigure, self).__init__(**kwargs)
self._bw_method = None
self._hierarchy = None
self.bw_method = bw_method
self.hierarchy = hierarchy
self.minima_ = None
self.draw()
def __repr__(self):
return self.__class__.__name__
@property
def bw_method(self):
"""The method to estimate the bandwidth
For a full list of options, please refer to
:meth:`~conkit.core.contactmap.ContactMap.get_contact_density`
"""
return self._bw_method
@bw_method.setter
def bw_method(self, bw_method):
"""Define the method to estimate the bandwidth"""
self._bw_method = bw_method
@property
def hierarchy(self):
"""A :obj:`~conkit.core.contactmap.ContactMap`"""
return self._hierarchy
@hierarchy.setter
def hierarchy(self, hierarchy):
"""Define the ConKit :obj:`ContactMap <conkit.core.contactmap.ContactMap>`
Raises
------
:exc:`TypeError`
The hierarchy is not a :obj:`~conkit.core.contactmap.ContactMap`
"""
if hierarchy and _isinstance(hierarchy, "ContactMap"):
self._hierarchy = hierarchy
else:
raise TypeError("The hierarchy is not an contact map")
@deprecate("0.11", msg="Use draw instead")
def redraw(self):
self.draw()
def draw(self):
x, y = self.get_xy_data()
self.ax.plot(x, y, linestyle="solid", color=ColorDefinitions.GENERAL, label="Contact Density", zorder=2)
line_kwargs = dict(linestyle="--", linewidth=1.0, alpha=0.5, color=ColorDefinitions.MISMATCH, zorder=1)
self.minima_ = []
for minimum in find_minima(y, order=1):
self.minima_.append(x[minimum])
self.ax.axvline(x[minimum], **line_kwargs)
self.ax.axvline(0, ymin=0, ymax=0, label="Domain Boundary", **line_kwargs)
self.ax.set_xlim(x.min(), x.max())
self.ax.set_ylim(0.0, y.max())
self.ax.set_xlabel("Residue number")
self.ax.set_ylabel("Density Estimate")
if self.legend:
self.ax.legend(
bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=3, mode="expand", borderaxespad=0.0, scatterpoints=1
)
# TODO: deprecate this in 0.10
if self._file_name:
self.savefig(self._file_name, dpi=self._dpi)
def get_xy_data(self):
residues = np.asarray(self.hierarchy.as_list()).flatten()
x = np.arange(residues.min(), residues.max() + 1)
y = np.asarray(self.hierarchy.get_contact_density(self.bw_method))
return x, y
| bsd-3-clause |
rahuldhote/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.