repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
chrsrds/scikit-learn | sklearn/ensemble/weight_boosting.py | 1 | 41252 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import xlogy
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_classifier, is_regressor
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_array, check_random_state, check_X_y, safe_indexing
from ..utils.extmath import softmax
from ..utils.extmath import stable_cumsum
from ..metrics import accuracy_score, r2_score
from ..utils.validation import check_is_fitted
from ..utils.validation import has_fit_parameter
from ..utils.validation import _num_samples
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def _validate_data(self, X, y=None):
# Accept or convert to these sparse matrix formats so we can
# use safe_indexing
accept_sparse = ['csr', 'csc']
if y is None:
ret = check_array(X,
accept_sparse=accept_sparse,
ensure_2d=False,
allow_nd=True,
dtype=None)
else:
ret = check_X_y(X, y,
accept_sparse=accept_sparse,
ensure_2d=False,
allow_nd=True,
dtype=None,
y_numeric=is_regressor(self))
return ret
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
X, y = self._validate_data(X, y)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(_num_samples(X), dtype=np.float64)
sample_weight[:] = 1. / _num_samples(X)
else:
sample_weight = check_array(sample_weight, ensure_2d=False)
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
random_state = check_random_state(self.random_state)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight,
random_state)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
X = self._validate_data(X)
for y_pred in self.staged_predict(X):
if is_classifier(self):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=None)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is ``DecisionTreeClassifier(max_depth=1)``
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
Examples
--------
>>> from sklearn.ensemble import AdaBoostClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.feature_importances_
array([0.28..., 0.42..., 0.14..., 0.16...])
>>> clf.predict([[0, 0, 0, 0]])
array([1])
>>> clf.score(X, y)
0.983...
See also
--------
AdaBoostRegressor, GradientBoostingClassifier,
sklearn.tree.DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super().fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight,
random_state)
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* ((n_classes - 1.) / n_classes)
* xlogy(y_coding, y_predict_proba).sum(axis=1))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
X = self._validate_data(X)
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
X = self._validate_data(X)
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_data(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_data(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
@staticmethod
def _compute_proba_from_decision(decision, n_classes):
"""Compute probabilities from the decision function.
This is based eq. (4) of [1] where:
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
= softmax((1 / K-1) * f(X))
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
2009.
"""
if n_classes == 2:
decision = np.vstack([-decision, decision]).T / 2
else:
decision /= (n_classes - 1)
return softmax(decision, copy=False)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_data(X)
n_classes = self.n_classes_
if n_classes == 1:
return np.ones((_num_samples(X), 1))
decision = self.decision_function(X)
return self._compute_proba_from_decision(decision, n_classes)
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_data(X)
n_classes = self.n_classes_
for decision in self.staged_decision_function(X):
yield self._compute_proba_from_decision(decision, n_classes)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_data(X)
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=None)
The base estimator from which the boosted ensemble is built.
If ``None``, then the base estimator is
``DecisionTreeRegressor(max_depth=3)``.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
Examples
--------
>>> from sklearn.ensemble import AdaBoostRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
>>> regr.fit(X, y)
AdaBoostRegressor(n_estimators=100, random_state=0)
>>> regr.feature_importances_
array([0.2788..., 0.7109..., 0.0065..., 0.0036...])
>>> regr.predict([[0, 0, 0, 0]])
array([4.7972...])
>>> regr.score(X, y)
0.9771...
See also
--------
AdaBoostClassifier, GradientBoostingRegressor,
sklearn.tree.DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super().fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = stable_cumsum(sample_weight)
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(_num_samples(X))
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
X_ = safe_indexing(X, bootstrap_idx)
y_ = safe_indexing(y, bootstrap_idx)
estimator.fit(X_, y_)
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]
# Return median predictions
return predictions[np.arange(_num_samples(X)), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_data(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_data(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
azariven/BioSig_SEAS | SEAS_Main/atmosphere_effects/depr_BioSig_Triage.py | 1 | 12061 | """
This is a test to interpolate the NIST Spectra with Simulation format
change C to X
change spectra to something more general that covers Xsec, absorbance, Transmittance
move the interpolation to another module
"""
#from imports import *
import numpy as np
from scipy import interpolate
import os
import sys
import matplotlib.pyplot as plt
BoltK = 1.38*10**-23
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
import SEAS_Utils.common_utils.jdx_Reader as jdx
import SEAS_Utils.common_utils.db_management2 as dbm
'''
def HITRAN_CIA():
"""
steps of 25K from 200-3000
each files contain 9981 data point. resolution is 1 cm^-1
N2 contains 554... they are not the same.
"""
filename = "../../../input/CIA/H2-H2_2011.cia"
#print util.check_file_exist(filename)
nu,coef = [],[]
temp = []
t_nu, t_coef = [],[]
start = True
with open(filename,"r") as f:
result = f.read().split("\n")
for i in result:
if i != "":
try:
a,b = i.split()
t_nu.append(float(a))
t_coef.append(float(b))
except:
if start:
start = False
temp.append(i.split()[4])
else:
temp.append(i.split()[4])
nu.append(t_nu)
coef.append(t_coef)
t_nu,t_coef = [],[]
return nu,coef,temp
def Exomol_spectra():
filename = "../../../data/Exomol_xsec/PH3_300_test.txt"
#print util.check_file_exist(filename)
nu,coef = [],[]
with open(filename,"r") as f:
result = f.read().split("\n")
for i in result:
if i != "":
a,b = i.split()
nu.append(float(a))
coef.append(float(b))
return nu,coef
'''
def HITRAN_xsec():
filename = "../../input/absorption_data/HITRAN_Cross_Section/O3/O3_300.0_0.0_29164.0-40798.0_04.xsc"
nu,coef = [],[]
with open(filename,"r") as f:
result = f.read().split("\n")
for i in result[1:]:
if i != "":
for j in i.split():
coef.append(j)
numin = 29164.
numax = 40798.
wavmin = 10000./numax
wavmax = 10000./numin
npts = 5818
wav = np.linspace(wavmin,wavmax,npts)
nu = 10000./wav[::-1]
return nu,coef
"""
def NIST_spectra(molecule,return_param):
kwargs = {"db_name":"molecule_db.db",
"user":"azariven",
"dir":"/Users/mac/Workspace/BioSig/database",
"DEBUG":False,"REMOVE":False,"BACKUP":False,"OVERWRITE":False}
cross_db = dbm.database(**kwargs)
cross_db.access_db()
cmd = "SELECT inchikey From ID WHERE Formula='%s'"%molecule
result = cross_db.c.execute(cmd)
try:
fetch = result.fetchall()[0][0]
except:
print "Molecule Doesn't Exist in NIST Database"
sys.exit()
path = os.path.join("/Users/mac/Workspace/BioSig/data/NIST_data/",fetch)
filename = ""
for j in os.listdir(path):
if "jdx" in j:
filename = os.path.join(path,j)
data = jdx.JdxFile(filename)
if return_param[0] == "wl":
x = data.wl()
elif return_param[0] == "wn":
x = data.wn()
if return_param[1] == "T":
y = data.trans()
elif return_param[1] == "A":
y = data.absorb()
return x,y
"""
def HITRAN_spectra(molecule,spectra_param,return_param):
kwargs = {"db_name":"cross_section_Sparse.db",
"user":"azariven",
"dir":"/Users/mac/Workspace/BioSig/database",
"DEBUG":False,"REMOVE":False,"BACKUP":False,"OVERWRITE":False}
cross_db = dbm.database(**kwargs)
cross_db.access_db()
unit = 0.0001
Pref = 100000
Tref = 300
P = spectra_param[0]
T = spectra_param[1]
numin = spectra_param[2]
numax = spectra_param[3]
pathl = spectra_param[4]
result = cross_db.c.execute("SELECT nu, coef FROM {} WHERE P={} AND T={} AND nu>={} AND nu<{} ORDER BY nu".format(molecule,P,T,numin,numax))
fetch = np.array(result.fetchall()).T
nu, coef = fetch
n = Pref/(BoltK*Tref)
absorb = n*coef*pathl*unit
trans = np.exp(-absorb)
if return_param[0] == "wl":
x = 10000/nu[::-1]
if return_param[1] == "T":
y = trans[::-1]
elif return_param[1] == "A":
y = absorb[::-1]
elif return_param[1] == "C":
y = coef[::-1]
elif return_param[0] == "wn":
x = nu
if return_param[1] == "T":
y = trans
elif return_param[1] == "A":
y = absorb
elif return_param[1] == "C":
y = coef
return x,y
def find_nearest(array,value):
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or np.fabs(value - array[idx-1]) < np.fabs(value - array[idx])):
return idx-1#array[idx-1]
else:
return idx#array[idx]
def test_interpolate(x1,y1,x2, type):
x1min = min(x1)
x1max = max(x1)
x2min = min(x2)
x2max = max(x2)
f = interpolate.interp1d(x1, y1)
try:
if x1min > x2min and x1max < x2max:
print "A"
left = find_nearest(x2,min(x1))+1
right = find_nearest(x2,max(x1))
if type == "A" or type == "C":
yinterp_left = np.zeros(left)
yinterp_right = np.zeros(len(x2)-right)
elif type == "T":
yinterp_left = np.ones(left)
yinterp_right = np.ones(len(x2)-right)
yinterp_middle = f(x2[left:right])
yinterp = np.concatenate([yinterp_left,yinterp_middle, yinterp_right])
elif x1min <= x2min and x1max < x2max:
print "B"
right = find_nearest(x2,max(x1))
if type == "A" or type == "C":
yinterp_right = np.zeros(len(x2)-right)
elif type == "T":
yinterp_right = np.ones(len(x2)-right)
yinterp_middle = f(x2[:right])
yinterp = np.concatenate([yinterp_middle, yinterp_right])
elif x1min > x2min and x1max >= x2max:
print "C"
left = find_nearest(x2,min(x1))+1
if type == "A" or type == "C":
yinterp_left = np.zeros(left)
elif type == "T":
yinterp_left = np.ones(left)
yinterp_middle = f(x2[left:])
yinterp = np.concatenate([yinterp_left,yinterp_middle])
else:
print "D"
yinterp = f(x2)
except:
print x1min,x1max
print x2min,x2max
sys.exit()
return yinterp
def NIST_All_Spectra(x2):
kwargs = {"db_name":"molecule_db.db",
"user":"azariven",
"dir":"/Users/mac/Workspace/BioSig/database",
"DEBUG":False,"REMOVE":False,"BACKUP":False,"OVERWRITE":False}
cross_db = dbm.database(**kwargs)
cross_db.access_db()
cmd = 'SELECT ID.smiles, ID.inchikey FROM ID,Spectra \
WHERE ID.inchikey=Spectra.inchikey AND Spectra.has_spectra="Y"'
result = cross_db.c.execute(cmd)
data = np.array(result.fetchall()).T
smiles = data[0]
spectras = data[1]
stuff = []
for i,spectra in enumerate(spectras):
path = os.path.join("/Users/mac/Workspace/BioSig/data/NIST_data/",spectra)
filename = ""
for j in os.listdir(path):
if "jdx" in j:
filename = os.path.join(path,j)
break
data = jdx.JdxFile(filename)
x = data.wn()
y = data.absorb()
yinterp = test_interpolate(x,y,x2,"Absorb")
stuff.append(yinterp)
"""
plt.plot(x,y)
if i>10:
break
"""
"""
ax = plt.gca()
ax.set_xscale('log')
plt.tick_params(axis='x', which='minor')
ax.xaxis.set_minor_formatter(FormatStrFormatter("%.1f"))
plt.show()
"""
print len(stuff)
return smiles, stuff
'''
def temperature_scaling(sigma, T_0, T):
return sigma*np.sqrt(T_0/T)
def spectra_differences():
P = 10000
T = 300.
numin = 400
numax = 30000
pathl = 0.002
for pathl in []:
molecule = "C2H2"
bins = 1000
x1,y1 = NIST_spectra(molecule,["wn","T"])
x2,y2 = HITRAN_spectra(molecule,[P,T,numin,numax,pathl],["wn","T"])
yinterp = test_interpolate(x1,y1,x2,"T")
dp = len(x2)
slices = np.linspace(0, dp, bins+1, True).astype(np.int)
counts = np.diff(slices)
mean_x_1 = np.add.reduceat(x2, slices[:-1]) / counts
mean_y_1 = np.add.reduceat(yinterp, slices[:-1]) / counts
dp = len(x2)
slices = np.linspace(0, dp, bins+1, True).astype(np.int)
counts = np.diff(slices)
mean_y_2 = np.add.reduceat(y2, slices[:-1]) / counts
plt.plot(mean_x_1,mean_y_2,"r")
plt.plot(mean_x_1,mean_y_1,"b")
plt.plot(mean_x_1,mean_y_2-mean_y_1,"g")
print pathl, sum(mean_y_2-mean_y_1)/len(mean_y_2)
'''
if __name__ == "__main__":
"""
Pref = 10000.
Tref = 300.
nref = Pref/(BoltK*Tref)
lref = 0.05
unit = 10000
numin = 400
numax = 30000
pathl = 1000000
molecule = "H2"
x1,y1 = HITRAN_xsec()
x2,y2 = HITRAN_spectra(molecule,[100000,300,numin,numax,pathl],["wn","T"])
yinterp = test_interpolate(x1,y1,x2,"C")
plt.plot(10000./x1,y1)
plt.plot(10000./x2,yinterp)
plt.show()
sys.exit()
"""
"""
Pref = 10000.
Tref = 300.
nref = Pref/(BoltK*Tref)
lref = 0.05
unit = 10000
numin = 400
numax = 30000
pathl = 1000000
molecule = "H2"
a,b = HITRAN_spectra(molecule,[100000,300,numin,numax,pathl],["wl","T"])
plt.plot(a,b)
plt.show()
sys.exit()
x1,y1 = NIST_spectra(molecule,["wn","T"])
x2,y2 = HITRAN_spectra(molecule,[100000,300,numin,numax,pathl],["wn","T"])
y1 = np.array(y1)+(1-(np.mean(y1)+np.median(y1))/2)
y1new = []
for i in y1:
if i > 1:
y1new.append(1)
else:
y1new.append(i)
y1 = y1new
yinterp = test_interpolate(x1,y1,x2,"T")
sigma = -np.log(yinterp)/(nref*lref)*unit
"""
"""
for P in [100000,10000,1000,100]:
for T in [250,300,350]:
n = P/(BoltK*T)
y = np.exp(-n*sigma*lref*0.0001)
plt.plot(x2,y)
"""
"""
x2,sigma,temp = HITRAN_CIA()
#plt.plot(x1,y1)
#plt.plot(10000./x2,yinterp)
for i in range(10):
plt.plot(x2[0],sigma[i],label=temp[i])
plt.title("H2 CIA")
plt.xlabel("wavenumber")
plt.ylabel("intensity")
plt.legend()
plt.show()
"""
"""
x1,y1 = HITRAN_spectra(molecule,[100000,300,numin,numax,pathl],["wl","T"])
x2,y2 = HITRAN_spectra(molecule,[10000,300,numin,numax,pathl],["wl","T"])
x3,y3 = HITRAN_spectra(molecule,[1000,300,numin,numax,pathl],["wl","T"])
#yinterp = test_interpolate(x1,y1,x2,"T")
plt.plot(x1,y1,"r")
plt.plot(x2,y2,"g")
plt.plot(x3,y3,"b")
plt.show()
"""
"""
x1,y1 = NIST_spectra("C2H2")
x2,y2 = HITRAN_spectra("CO")
#NIST_All_Spectra(x2)
"""
| gpl-3.0 |
laurajchang/NPTFit | NPTFit/dnds_analysis.py | 1 | 17056 | ###############################################################################
# dnds_analysis.py
###############################################################################
#
# Analyze results of a non-Poissonian template fit. Code to produce:
#
# - Template intensities and confidence intervals
# - Source count distributions
# - Intensity fractions
# - Triangle plots and log-evidences
#
# NB: code default to assumption analysis is done on a HEALPix map, if this
# is not the case, must insert a pixarea at initialization.
#
###############################################################################
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
import corner
class Analysis:
""" Class to analyze results of an NPTF.
:param nptf: an instance of nptfit.NPTF, where load_scan() has been performed
:param mask: if analysis is to be performed in a different ROI to the run, insert
the analysis mask here
:param pixarea: if using a non-HEALPix map, insert the area of a pixel (in sr)
"""
def __init__(self, nptf, mask=None, pixarea=0.):
self.nptf = nptf
# Default to HEALPix map
if pixarea == 0.:
self.pixarea = 4*np.pi/self.nptf.npix
else:
self.pixarea = pixarea
if mask is None:
self.mask_total = self.nptf.mask_total
else:
self.mask_total = mask
self.mask_compress_data()
def return_intensity_arrays_non_poiss(self, comp, smin=0.01, smax=10000,
nsteps=10000, counts=False):
""" Return intensity quantiles of a non-Poissonian template
:param comp: key of non-Poissonian template
:param smin: minimum count to "integrate" dnds from
:param smax: maximum count to "integrate" dnds to
:param nsteps: number of count bins in sum approximation of integral
:param counts: whether to return counts (or intensities, by default)
"""
template = self.nptf.templates_dict_nested[comp]['template']
template_masked_compressed = self.mask_and_compress(
template, self.mask_total)
# If intensity, convert from counts to counts/cm^2/s/sr
if counts:
self.template_sum = np.sum(template_masked_compressed)
else:
self.template_sum = np.mean(template_masked_compressed /
self.exp_masked_compressed /
self.pixarea)
self.sarray = 10**np.linspace(np.log10(smin), np.log10(smax), nsteps)
self.ds = [self.sarray[i+1]-self.sarray[i]
for i in range(len(self.sarray)-1)]
self.ds = np.array(self.ds + [self.ds[-1]])
# Get NPT intensity arrays. These are calculated as
# \int(dS*S*dN/dS). Note that the APS parameter is a
# rescaling of the counts, which is why to get the
# intensity this is multiplied by the total counts
self.intensity_array_non_poiss = \
list(map(lambda sample:
np.sum(self.template_sum *
self.dnds(comp, sample, self.sarray) *
self.sarray*self.ds), self.nptf.samples))
return self.intensity_array_non_poiss
def return_intensity_arrays_poiss(self, comp, counts=False):
""" Return intensity arrays of a Poissonian template
:param comp: key of Poissonian template
:param counts: whether to return counts (or intensities, by default)
"""
template = self.nptf.templates_dict_nested[comp]['template']
template_masked_compressed = self.mask_and_compress(
template, self.mask_total)
# If intensity, convert from counts to counts/cm^2/s/sr
if counts:
self.template_sum = np.sum(template_masked_compressed)
else:
self.template_sum = np.mean(template_masked_compressed /
self.exp_masked_compressed /
self.pixarea)
# Get PT intensities by scaling the compressed mask intensity
# by the relevant normalizations from chains
self.intensity_array_poiss = \
list(map(lambda sample: self.template_sum *
self.return_poiss_samples(comp, sample),
self.nptf.samples))
return self.intensity_array_poiss
def return_non_poiss_samples(self, comp, sample):
""" Return non-Poissonian samples corrected for log priors
"""
# Load all NPT models (stored after PT models)
self.model_decompression_non_poiss = \
np.array(self.nptf.model_decompression_key[self.nptf.n_poiss:])
model_where = \
np.where(self.model_decompression_non_poiss[:, 0] == comp)[0] \
+ self.nptf.n_poiss
is_log_prior = \
np.array(self.nptf.model_decompression_key)[model_where][:, 1]
is_log_prior = list(is_log_prior == 'True')
samples_model_not_log = \
self.log_to_normal(np.array(sample)[model_where], is_log_prior)
return samples_model_not_log
def return_poiss_samples(self, comp, sample):
""" Return Poissonian samples corrected for log priors
"""
# Load all PT models
self.model_decompression_poiss = \
np.array(self.nptf.model_decompression_key[:self.nptf.n_poiss])
model_where = \
np.where(self.model_decompression_poiss[:, 0] == comp)[0]
is_log_prior = \
np.array(self.model_decompression_poiss)[model_where][0][1]
samples_model_not_log = self.log_to_normal(
np.array(sample)[model_where], [is_log_prior == 'True'])[0]
return samples_model_not_log
def return_dndf_arrays(self, comp, flux):
""" Calcualte and return array of dN/dF values for the template comp
and the given array of flux values (in counts/cm^2/s)
"""
template = self.nptf.templates_dict_nested[comp]['template']
template_masked_compressed = \
self.mask_and_compress(template, self.mask_total)
self.template_sum = np.sum(template_masked_compressed)
# Rescaling factor to convert dN/dS to [(counts/cm^2 /s)^-2 /deg^2]
# Note that self.area_mask has units deg^2.
rf = self.template_sum*self.exp_masked_mean/self.area_mask
# Get counts from flux
s = np.array([flux])*self.exp_masked_mean
return rf*np.array([self.dnds(comp, sample, s)[0]
for sample in self.nptf.samples])
def calculate_dndf_arrays(self, comp, smin=0.01, smax=1000, nsteps=1000,
qs=[0.16, 0.5, 0.84]):
""" Calculate dnds for specified quantiles
"""
template = self.nptf.templates_dict_nested[comp]['template']
template_masked_compressed = \
self.mask_and_compress(template, self.mask_total)
self.template_sum = np.sum(template_masked_compressed)
self.sarray = 10**np.linspace(np.log10(smin), np.log10(smax), nsteps)
self.flux_array = self.sarray/self.exp_masked_mean
self.data_array = np.array([self.dnds(comp, sample, self.sarray)
for sample in self.nptf.samples])
# Rescaling factor to convert dN/dS to [(ph /cm^2 /s)^-2 /deg^2]
# Note that self.area_mask has units deg^2.
rf = self.template_sum*self.exp_masked_mean/self.area_mask
self.qArray = [corner.quantile(self.data_array[::, i], qs)
for i in range(len(self.sarray))]
self.qmean = rf*np.array([np.mean(self.data_array[::, i])
for i in range(len(self.sarray))])
self.qlow = rf*np.array([q[0] for q in self.qArray])
self.qmid = rf*np.array([q[1] for q in self.qArray])
self.qhigh = rf*np.array([q[2] for q in self.qArray])
def plot_source_count_band(self, comp, smin=0.01, smax=1000, nsteps=1000,
qs=[0.16, 0.5, 0.84], spow=0, *args, **kwargs):
""" Calculate and plot median source count function
:param comp: key of non-Poissonian template
:param smin: minimum count to plot
:param smax: maximum count to plot
:param nsteps: binning in counts s
:param qs: source count quartles to plot
:param spow: plotting s**spow*dn/ds
**kwargs: plotting options
"""
self.calculate_dndf_arrays(comp, smin=smin, smax=smax,
nsteps=nsteps, qs=qs)
plt.fill_between(self.flux_array, self.flux_array**spow*self.qlow,
self.flux_array**spow*self.qhigh, *args, **kwargs)
def plot_source_count_median(self, comp, smin=0.01, smax=1000, nsteps=1000,
spow=0, qs=[0.16, 0.5, 0.84], *args, **kwargs):
""" Calculate and plot median source count function
"""
self.calculate_dndf_arrays(comp, smin=smin, smax=smax,
nsteps=nsteps, qs=qs)
plt.plot(self.flux_array, self.flux_array**spow*self.qmid,
*args, **kwargs)
def plot_intensity_fraction_non_poiss(self, comp, smin=0.00001, smax=1000,
nsteps=1000, qs=[0.16, 0.5, 0.84],
bins=50, color='blue',
ls_vert='dashed', *args, **kwargs):
""" Plot flux fraction of a non-Poissonian template
:param bins: flux fraction bins
:param color_vert: colour of vertical quartile lines
:param ls_vert: matplotlib linestyle of vertical quartile lines
**kwargs: plotting options
"""
flux_fraction_array_non_poiss = \
np.array(self.return_intensity_arrays_non_poiss(comp, smin=smin,
smax=smax, nsteps=nsteps, counts=True))/self.total_counts
frac_hist_comp, bin_edges_comp = \
np.histogram(100*np.array(flux_fraction_array_non_poiss), bins=bins,
range=(0, 100))
qs_comp = \
corner.quantile(100*np.array(flux_fraction_array_non_poiss), qs)
plt.plot(bin_edges_comp[:-1],
frac_hist_comp/float(sum(frac_hist_comp)),
color=color, *args, **kwargs)
for q in qs_comp:
plt.axvline(q, ls=ls_vert, color=color)
self.qs_comp = qs_comp
def plot_intensity_fraction_poiss(self, comp, qs=[0.16, 0.5, 0.84], bins=50,
color='blue', ls_vert='dashed',
*args, **kwargs):
""" Plot flux fraction of non-Poissonian component
"""
flux_fraction_array_poiss = \
np.array(self.return_intensity_arrays_poiss(comp, counts=True))\
/ self.total_counts
frac_hist_comp, bin_edges_comp = \
np.histogram(100*np.array(flux_fraction_array_poiss), bins=bins,
range=(0, 100))
qs_comp = corner.quantile(100*np.array(flux_fraction_array_poiss), qs)
plt.plot(bin_edges_comp[:-1],
frac_hist_comp/float(sum(frac_hist_comp)),
color=color, *args, **kwargs)
for q in qs_comp:
plt.axvline(q, ls=ls_vert, color=color)
self.qs_comp = qs_comp
def return_poiss_parameter_posteriors(self, comp):
""" Return posterior samples corresponding to individual parameters.
:return: sample normalization values
"""
self.samples_reduced_ary = [self.return_poiss_samples(comp, sample)
for sample in self.nptf.samples]
return self.samples_reduced_ary
def return_non_poiss_parameter_posteriors(self, comp):
""" Return posterior samples corresponding to individual parameters.
:return: sample non-Poissonian posterior values values, list with
self.aps_ary: sample normalization values
self.n_ary_ary: sampled slopes, each sub-array corresponding to
samples for that slope (highest to lowest).
self.sb_ary_ary: sampled breaks, each sub-array corresponding to
samples for that break (highest to lowest).
"""
self.samples_reduced_ary = [self.return_non_poiss_samples(comp, sample)
for sample in self.nptf.samples]
self.samples_reduced_param_ary = list(zip(*self.samples_reduced_ary))
nbreak = int((len(self.samples_reduced_ary[0]) - 2)/2.)
self.aps_ary = self.samples_reduced_param_ary[0]
self.n_ary_ary = [[] for _ in range(nbreak+1)]
self.sb_ary_ary = [[] for _ in range(nbreak)]
for i in range(nbreak+1):
self.n_ary_ary[i] = self.samples_reduced_param_ary[i+1]
for i in range(nbreak):
self.sb_ary_ary[i] = self.samples_reduced_param_ary[i+nbreak+2]
return self.aps_ary, self.n_ary_ary, self.sb_ary_ary
def dnds(self, comp, sample, s):
""" dN/dS values for NPT comp associated with a chain sample
"""
samples_reduced = self.return_non_poiss_samples(comp, sample)
nbreak = int((len(samples_reduced) - 2)/2.)
# Get APS (float) and slopes/breaks (arrays)
a_ps, n_ary, sb_ary = samples_reduced[0], samples_reduced[1:nbreak+2], \
samples_reduced[nbreak+2:]
# If relative breaks, define each break as (except the last one)
# the multiplicative factor times the previous break
if self.nptf.non_poiss_models[comp]['dnds_model'] \
== 'specify_relative_breaks':
for i in reversed(range(len(sb_ary) - 1)):
sb_ary[i] = sb_ary[i+1]*sb_ary[i]
# Determine where the s values fall with respect to the breaks
where_vecs = [[] for _ in range(nbreak+1)]
where_vecs[0] = np.where(s >= sb_ary[0])[0]
for i in range(1, nbreak):
where_vecs[i] = np.where((s >= sb_ary[i]) & (s < sb_ary[i-1]))[0]
where_vecs[-1] = np.where(s < sb_ary[-1])[0]
# Calculate dnds values for a broken power law with arbitrary breaks
dnds = np.zeros(len(s))
dnds[where_vecs[0]] = a_ps*(s[where_vecs[0]]/sb_ary[0])**(-n_ary[0])
dnds[where_vecs[1]] = a_ps*(s[where_vecs[1]]/sb_ary[0])**(-n_ary[1])
for i in range(2, nbreak+1):
dnds[where_vecs[i]] = \
a_ps*np.prod([(sb_ary[j+1]/sb_ary[j])**(-n_ary[j+1])
for j in range(0, i-1)]) * \
(s[where_vecs[i]]/sb_ary[i-1])**(-n_ary[i])
return dnds
@staticmethod
def log_to_normal(array, is_log):
""" Take array and account for the impact of log priors
"""
array_normal = []
for i in range(len(array)):
if is_log[i]:
array_normal.append(10**array[i])
else:
array_normal.append(array[i])
return array_normal
@staticmethod
def mask_and_compress(the_map, mask):
""" Return compressed version of a map
"""
map_masked = ma.masked_array(data=the_map, mask=mask)
return map_masked.compressed()
def mask_compress_data(self):
""" Adjust the data and exposure for the mask
"""
self.data_masked = ma.masked_array(data=self.nptf.count_map,
mask=self.mask_total)
self.data_masked_compressed = self.data_masked.compressed()
self.total_counts = float(np.sum(self.data_masked_compressed))
self.area_mask = np.sum(1-self.mask_total)*self.pixarea *\
(360/(2.*np.pi))**2
self.exp_masked = ma.masked_array(data=self.nptf.exposure_map,
mask=self.mask_total)
self.exp_masked_compressed = self.exp_masked.compressed()
self.exp_masked_mean = np.mean(self.exp_masked_compressed)
def get_log_evidence(self):
""" Global log-evidence and associated error
"""
self.lge = self.nptf.s['nested sampling global log-evidence']
self.lge_err = self.nptf.s['nested sampling global log-evidence error']
return self.lge, self.lge_err
def make_triangle(self):
""" Make a triangle plot
"""
corner.corner(self.nptf.samples, labels=self.nptf.params, smooth=1.5,
smooth1d=1, quantiles=[0.16, 0.5, 0.84], show_titles=True,
title_fmt='.2f', title_args={'fontsize': 14},
range=[1 for _ in range(self.nptf.n_params)],
plot_datapoints=False, verbose=False)
| mit |
anhaidgroup/py_stringsimjoin | benchmarks/asv_benchmarks/data_generator.py | 1 | 1357 | """Utilities to generate synthetic data"""
import random
import string
import pandas as pd
def generate_tokens(mean, std_dev, num_tokens):
tokens = {}
cnt = 0
while cnt < num_tokens:
length = int(round(random.normalvariate(mean,
std_dev)))
if length < 2:
continue
flag = True
while flag:
new_token = ''.join(random.choice(string.ascii_lowercase)
for i in range(length))
if tokens.get(new_token) is None:
tokens[new_token] = True
flag = False
cnt += 1
return list(tokens.keys())
def generate_table(mean, std_dev, tokens, num_records,
id_col_name, attr_col_name):
records = []
cnt = 0
num_tokens = len(tokens)
while cnt < num_records:
size = int(round(random.normalvariate(mean,
std_dev)))
new_string = ''
for i in range(size):
rand = random.randint(0, num_tokens - 1)
if i == 0:
new_string += tokens[rand]
else:
new_string += ' ' + tokens[rand]
records.append([cnt, new_string])
cnt += 1
return pd.DataFrame(records, columns=[id_col_name, attr_col_name])
| bsd-3-clause |
dmitriz/zipline | zipline/data/loader.py | 1 | 16153 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from collections import OrderedDict
import logbook
import pandas as pd
from pandas.io.data import DataReader
import pytz
from six import iteritems
from . benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from .paths import (
cache_root,
data_root,
)
from zipline.utils.tradingcalendar import (
trading_day as trading_day_nyse,
trading_days as trading_days_nyse,
)
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'^GSPC':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
ONE_HOUR = pd.Timedelta(hours=1)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=trading_day_nyse,
trading_days=trading_days_nyse,
bm_symbol='^GSPC'):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Yahoo Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to '^GSPC', the Yahoo
ticker for the S&P 500.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2]
benchmark_returns = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
)
treasury_curves = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
)
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a file
# in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new benchmark "
"data because a download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
data = get_benchmark_returns(symbol, first_date - trading_day, last_date)
data.to_csv(path)
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(bm_symbol, first_date, last_date, now):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC']
)
first_date = max(first_date, loader_module.earliest_possible_date())
path = get_data_filepath(filename)
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a file
# in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new treasury "
"data because a download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(path)
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': '^GSPC'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
print(stock)
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
print(name)
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
def load_bars_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads data from Yahoo into a panel with the following
column names for each indicated security:
- open
- high
- low
- close
- volume
- price
Note that 'price' is Yahoo's 'Adjusted Close', which removes the
impact of splits and dividends. If the argument 'adjusted' is True, then
the open, high, low, and close values are adjusted as well.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust open/high/low/close for splits and dividends.
The 'price' field is always adjusted.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
panel = pd.Panel(data)
# Rename columns
panel.minor_axis = ['open', 'high', 'low', 'close', 'volume', 'price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
# Adjust data
if adjusted:
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
ratio = (panel[ticker]['price'] / panel[ticker]['close'])
ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
panel[ticker][col] *= ratio_filtered
return panel
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
| apache-2.0 |
adamgreenhall/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
muku42/seaborn | examples/pairgrid_dotplot.py | 27 | 1056 | """
Dot plot with several variables
===============================
_thumb: .3, .3
"""
import seaborn as sns
sns.set(style="whitegrid")
# Load the dataset
crashes = sns.load_dataset("car_crashes")
# Make the PairGrid
g = sns.PairGrid(crashes.sort("total", ascending=False),
x_vars=crashes.columns[:-3], y_vars=["abbrev"],
size=10, aspect=.25)
# Draw a dot plot using the stripplot function
g.map(sns.stripplot, size=10, orient="h",
palette="Reds_r", edgecolor="gray")
# Use the same x axis limits on all columns and add better labels
g.set(xlim=(0, 25), xlabel="Crashes", ylabel="")
# Use semantically meaningful titles for the columns
titles = ["Total crashes", "Speeding crashes", "Alcohol crashes",
"Not distracted crashes", "No previous crashes"]
for ax, title in zip(g.axes.flat, titles):
# Set a different title for each axes
ax.set(title=title)
# Make the grid horizontal instead of vertical
ax.xaxis.grid(False)
ax.yaxis.grid(True)
sns.despine(left=True, bottom=True)
| bsd-3-clause |
sjperkins/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 42 | 11900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
hmendozap/auto-sklearn | test/test_pipeline/components/classification/test_passive_aggressive.py | 1 | 3720 | import unittest
from autosklearn.pipeline.components.classification.passive_aggressive import \
PassiveAggressive
from autosklearn.pipeline.util import _test_classifier, \
_test_classifier_iterative_fit, _test_classifier_predict_proba
import numpy as np
import sklearn.metrics
import sklearn.linear_model
class PassiveAggressiveComponentTest(unittest.TestCase):
def test_default_configuration(self):
for i in range(10):
predictions, targets = _test_classifier(PassiveAggressive)
self.assertAlmostEqual(0.76000000000000001,
sklearn.metrics.accuracy_score(predictions,
targets))
def test_default_configuration_iterative_fit(self):
for i in range(10):
predictions, targets = _test_classifier_iterative_fit(
PassiveAggressive)
self.assertAlmostEqual(0.68000000000000005,
sklearn.metrics.accuracy_score(
predictions, targets))
def test_default_configuration_digits(self):
for i in range(10):
predictions, targets = \
_test_classifier(classifier=PassiveAggressive, dataset='digits')
self.assertAlmostEqual(0.90710382513661203,
sklearn.metrics.accuracy_score(predictions,
targets))
def test_default_configuration_digits_iterative_fit(self):
for i in range(10):
predictions, targets = _test_classifier_iterative_fit(classifier=PassiveAggressive,
dataset='digits')
self.assertAlmostEqual(0.91317547055251969,
sklearn.metrics.accuracy_score(
predictions, targets))
def test_default_configuration_binary(self):
for i in range(10):
predictions, targets = _test_classifier(PassiveAggressive,
make_binary=True)
self.assertAlmostEqual(1.0,
sklearn.metrics.accuracy_score(predictions,
targets))
def test_default_configuration_multilabel(self):
for i in range(10):
predictions, targets = \
_test_classifier(classifier=PassiveAggressive,
dataset='digits',
make_multilabel=True)
self.assertAlmostEqual(0.8975269956947447,
sklearn.metrics.average_precision_score(
targets, predictions))
def test_default_configuration_multilabel_predict_proba(self):
for i in range(10):
predictions, targets = \
_test_classifier_predict_proba(classifier=PassiveAggressive,
make_multilabel=True)
self.assertEqual(predictions.shape, ((50, 3)))
self.assertAlmostEqual(0.99703892466326138,
sklearn.metrics.average_precision_score(
targets, predictions))
def test_target_algorithm_multioutput_multiclass_support(self):
cls = sklearn.linear_model.PassiveAggressiveClassifier()
X = np.random.random((10, 10))
y = np.random.randint(0, 1, size=(10, 10))
self.assertRaisesRegexp(ValueError, 'bad input shape \(10, 10\)',
cls.fit, X, y) | bsd-3-clause |
timnon/pyschedule | examples/alternating-shifts.py | 1 | 1344 | import sys
sys.path.append('../src')
import getopt
opts, _ = getopt.getopt(sys.argv[1:], 't:', ['test'])
n_night_shifts = 5
n_day_shifts = 5
n_tasks = n_night_shifts+n_day_shifts
horizon = n_tasks
from pyschedule import Scenario, solvers, plotters
S = Scenario('shift_bounds',horizon=horizon)
R = S.Resource('P')
for i in range(n_night_shifts):
# added some delay cost, so without any
# constraint, there would be first 5 night shifts
# and then 5 day shifts
T = S.Task('N%i'%i,delay_cost=2)
# the shift type of night shifts is -1
T.shift_type = -1
T += R
for i in range(n_day_shifts):
T = S.Task('D%i'%i,delay_cost=1)
# the shift type of day shifts is -1
T.shift_type = 1
T += R
for i in range(horizon):
# for every set of periods 1..i, make sure that
# there is always at most one more night shift than
# day shifts and vice versa. Each capacity constraint
# limits the sum of 'shift_types' in the range
S += R[:i]['shift_type'] <= 1
S += R[:i]['shift_type'] >= -1
if solvers.mip.solve(S,msg=0,kind='CBC'):
if ('--test','') in opts:
assert( set( T.start_value % 2 for T in S.tasks() if T.name.startswith('N') ) == {0} )
assert( set( T.start_value % 2 for T in S.tasks() if T.name.startswith('D') ) == {1} )
print('test passed')
else:
plotters.matplotlib.plot(S)
else:
print('no solution found')
assert(1==0)
| apache-2.0 |
pandegroup/osprey | osprey/plot.py | 2 | 4611 | import warnings
from collections import OrderedDict
import numpy as np
from matplotlib import cm
from matplotlib.colors import rgb2hex
from sklearn.manifold import TSNE
try:
import pandas as pd
import bokeh.plotting as bk
from bokeh.models import HoverTool
from bokeh.models.sources import ColumnDataSource
except ImportError:
raise RuntimeError(
'This command requires the Bokeh library (http://bokeh.pydata.org/) '
'version >=0.10.0.\n\n $ conda install bokeh # (recommended)\n'
'or\n $ pip install bokeh')
TOOLS = "pan,wheel_zoom,box_zoom,reset,hover"
def nonconstant_parameters(data):
assert len(data) > 0
df = pd.DataFrame([d['parameters'] for d in data])
# http://stackoverflow.com/a/20210048/1079728
filtered = df.loc[:, (df != df.ix[0]).any()]
return filtered
def build_scatter_tooltip(x, y, tt, add_line=True, radius=.1, title='My Plot',
xlabel='Iteration number', ylabel='Score'):
p = bk.figure(title=title, tools=TOOLS)
tt['x'] = x
tt['y'] = y
tt['radius'] = radius
p.circle(
x='x', y='y', radius='radius', source=tt,
fill_alpha=0.6, line_color=None)
if add_line:
p.line(x, y, line_width=2)
xax, yax = p.axis
xax.axis_label = xlabel
yax.axis_label = ylabel
cp = p
hover = cp.select(dict(type=HoverTool))
format_tt = [(s, '@%s' % s) for s in tt.columns]
hover.tooltips = OrderedDict([("index", "$index")] + format_tt)
return p
def plot_1(data, *args):
"""Plot 1. All iterations (scatter plot)"""
df_all = pd.DataFrame(data)
df_params = nonconstant_parameters(data)
return build_scatter_tooltip(
x=df_all['id'], y=df_all['mean_test_score'], tt=df_params,
title='All Iterations')
def plot_2(data, *args):
"""Plot 2. Running best score (scatter plot)"""
df_all = pd.DataFrame(data)
df_params = nonconstant_parameters(data)
x = [df_all['id'][0]]
y = [df_all['mean_test_score'][0]]
params = [df_params.loc[0]]
for i in range(len(df_all)):
if df_all['mean_test_score'][i] > y[-1]:
x.append(df_all['id'][i])
y.append(df_all['mean_test_score'][i])
params.append(df_params.loc[i])
return build_scatter_tooltip(
x=x, y=y, tt=pd.DataFrame(params), title='Running best')
def plot_3(data, ss, *args):
"""t-SNE embedding of the parameters, colored by score
"""
if len(data) <= 1:
warnings.warn("Only one datapoint. Could not compute t-SNE embedding.")
return None
scores = np.array([d['mean_test_score'] for d in data])
# maps each parameters to a vector of floats
warped = np.array([ss.point_to_unit(d['parameters']) for d in data])
# Embed into 2 dimensions with t-SNE
X = TSNE(n_components=2).fit_transform(warped)
e_scores = np.exp(scores)
mine, maxe = np.min(e_scores), np.max(e_scores)
color = (e_scores - mine) / (maxe - mine)
mapped_colors = list(map(rgb2hex, cm.get_cmap('RdBu_r')(color)))
p = bk.figure(title='t-SNE (unsupervised)', tools=TOOLS)
df_params = nonconstant_parameters(data)
df_params['score'] = scores
df_params['x'] = X[:, 0]
df_params['y'] = X[:, 1]
df_params['color'] = mapped_colors
df_params['radius'] = 1
p.circle(
x='x', y='y', color='color', radius='radius',
source=ColumnDataSource(data=df_params), fill_alpha=0.6,
line_color=None)
cp = p
hover = cp.select(dict(type=HoverTool))
format_tt = [(s, '@%s' % s) for s in df_params.columns]
hover.tooltips = OrderedDict([("index", "$index")] + format_tt)
xax, yax = p.axis
xax.axis_label = 't-SNE coord 1'
yax.axis_label = 't-SNE coord 2'
return p
def plot_4(data, *args):
"""Scatter plot of score vs each param
"""
params = nonconstant_parameters(data)
scores = np.array([d['mean_test_score'] for d in data])
order = np.argsort(scores)
for key in params.keys():
if params[key].dtype == np.dtype('bool'):
params[key] = params[key].astype(np.int)
p_list = []
for key in params.keys():
x = params[key][order]
y = scores[order]
params = params.loc[order]
try:
radius = (np.max(x) - np.min(x)) / 100.0
except:
print("error making plot4 for '%s'" % key)
continue
p_list.append(build_scatter_tooltip(
x=x, y=y, radius=radius, add_line=False, tt=params,
xlabel=key, title='Score vs %s' % key))
return p_list
| apache-2.0 |
sinhrks/numpy | numpy/lib/function_base.py | 7 | 134693 | from __future__ import division, absolute_import, print_function
import warnings
import sys
import collections
import operator
import numpy as np
import numpy.core.numeric as _nx
from numpy.core import linspace, atleast_1d, atleast_2d
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
integer, isscalar
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
mod, exp, log10
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes, number
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import _insert, add_docstring
from numpy.core.multiarray import digitize, bincount, interp as compiled_interp
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
# Force range to be a generator, for np.delete's usage.
if sys.version_info[0] < 3:
range = xrange
__all__ = [
'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc'
]
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try:
iter(y)
except:
return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None,
density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError(
'`bins` should be a positive integer.')
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi + 0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, np.complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
# Initialize empty histogram
n = np.zeros(bins, ntype)
# Pre-compute histogram scaling factor
norm = bins / (mx - mn)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= mn)
keep &= (tmp_a <= mx)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a = tmp_a.astype(float)
tmp_a -= mn
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on mx we
# need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == bins] -= 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real, minlength=bins)
n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
else:
n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
# We now compute the bin edges since these are returned
bins = linspace(mn, mx, bins + 1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Initialize empty histogram
n = np.zeros(bins.shape, ntype)
if weights is None:
for i in arange(0, len(a), BLOCK):
sa = sort(a[i:i+BLOCK])
n += np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero, ], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'),
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# avoid rounding issues for comparisons when dealing with inexact types
if np.issubdtype(sample.dtype, np.inexact):
edge_dt = sample.dtype
else:
edge_dt = float
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError(
"Element at index %s in `bins` should be a positive "
"integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1, dtype=edge_dt)
else:
edges[i] = asarray(bins[i], edge_dt)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError(
"Found bin edge of size <= 0. Did you specify `bins` with"
"non-monotonic sequence?")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
not_smaller_than_edge = (sample[:, i] >= edges[i][-1])
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1, -1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : array_type or double
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix):
a = np.asarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have a default value of 0.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.linspace(-2.5, 2.5, 6)
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if (isscalar(condlist) or not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray))):
condlist = [condlist]
condlist = array(condlist, dtype=bool)
n = len(condlist)
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
zerod = False
if x.ndim == 0:
x = x[None]
zerod = True
if condlist.shape[-1] != 1:
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
condlist = np.vstack([condlist, ~totlist])
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError(
'list of cases must be same length as list of conditions')
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
# 2014-02-24, 1.9
warnings.warn("select with an empty condition list is not possible"
"and will be deprecated",
DeprecationWarning)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
'invalid entry in choicelist: should be boolean ndarray')
if deprecated_ints:
# 2014-02-24, 1.9
msg = "select condlists containing integer ndarrays is deprecated " \
"and will be removed in the future. Use `.astype(bool)` to " \
"convert to bools."
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior and either first differences or second order accurate
one-sides (forward or backwards) differences at the boundaries. The
returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
Returns
-------
gradient : list of ndarray
Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
>>> x = np.array([0, 1, 2, 3, 4])
>>> dx = np.gradient(x)
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
edge_order = kwargs.pop('edge_order', 1)
if kwargs:
raise TypeError('"{}" are not valid keyword arguments.'.format(
'", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M':
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm':
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
y = f
for axis in range(N):
if y.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (y[1] - y[0])
out[slice1] = (y[slice2] - y[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (y[-1] - y[-2])
out[slice1] = (y[slice2] - y[slice3])
# Numerical differentiation: 2st order edges, 2nd order interior
else:
# Use second order differences where possible
out = np.empty_like(y, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (y[2:] - y[:-2])/2.0
out[slice1] = (y[slice2] - y[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
# 1D equivalent -- out[0] = -(3*y[0] - 4*y[1] + y[2]) / 2.0
out[slice1] = -(3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
slice4[axis] = -3
# 1D equivalent -- out[-1] = (3*y[-1] - 4*y[-2] + y[-3])
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
out /= dx[axis]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d, cumsum
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
if period is None:
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
else:
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
return_array = True
if isinstance(x, (float, int, number)):
return_array = False
x = [x]
x = np.asarray(x, dtype=np.float64)
xp = np.asarray(xp, dtype=np.float64)
fp = np.asarray(fp, dtype=np.float64)
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError("Data points must be 1-D sequences")
if xp.shape[0] != fp.shape[0]:
raise ValueError("fp and xp are not of the same length")
# normalizing periodic boundaries
x = x % period
xp = xp % period
asort_xp = np.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = np.concatenate((fp[-1:], fp, fp[0:1]))
if return_array:
return compiled_interp(x, xp, fp, left, right)
else:
return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd + pi, 2*pi) - pi
_nx.copyto(ddmod, pi, where=(ddmod == -pi) & (dd > 0))
ph_correct = ddmod - dd
_nx.copyto(ph_correct, 0, where=abs(dd) < discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.:
break
else:
first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.:
break
else:
last = last - 1
return filt[first:last]
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True], tmp[1:] != tmp[:-1]))
return tmp[idx]
except AttributeError:
items = sorted(set(x))
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Note that `place` does the exact opposite of `extract`.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress, place
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the
first argument will be used to determine the number of outputs. The
results of this call will be cached if `cache` is `True` to prevent
calling the function twice. However, to implement the cache, the
original function must be wrapped which will slow down subsequent
calls, so only do this if your function is expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None,
cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None, fweights=None, aweights=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` corresponds to the
number of observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using the
keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if rowvar == 0 and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if rowvar == 0 and y.shape[0] != 1:
y = y.T
X = np.vstack((X, y))
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=np.float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=np.float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = float(X.shape[1] - ddof)
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof*sum(w*aweights)/w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X*w).T
return (dot(X, X_T.conj())/fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
ddof : _NoValue, optional
Has no affect, do not use.
.. deprecated:: 1.10.0
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no affect and are deprecated',
DeprecationWarning)
c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
return c / sqrt(multiply.outer(d, d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0, M)
return 0.54 - 0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x < 0)
x[ind] = -x[ind]
ind = (x <= 8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.linspace(-4, 4, 401)
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a, subok=True, copy=True)
b.sort(0)
return b
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function Kapable of receiving an axis argument.
It is is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
try:
axis = operator.index(axis)
if axis >= nd or axis < -nd:
raise IndexError("axis %d out of bounds (%d)" % (axis, a.ndim))
keepdim[axis] = 1
except TypeError:
sax = set()
for x in axis:
if x >= nd or x < -nd:
raise IndexError("axis %d out of bounds (%d)" % (x, nd))
if x in sax:
raise ValueError("duplicate value in axis")
sax.add(x % nd)
keepdim[x] = 1
keep = sax.symmetric_difference(frozenset(range(nd)))
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
else:
keepdim = [1] * a.ndim
r = func(a, **kwargs)
return r, keepdim
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve the
contents of the input array. Treat the input as undefined, but it
will probably be fully or partially sorted. Default is False. Note
that, if `overwrite_input` is True and the input is not already an
ndarray, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in which
case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
# warn and return nans like mean would
rout = mean(part[indexer], axis=axis, out=out)
part = np.rollaxis(part, axis, part.ndim)
n = np.isnan(part[..., -1])
if rout.ndim == 0:
if n == True:
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if out is not None:
out[...] = a.dtype.type(np.nan)
rout = out
else:
rout = a.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
warnings.warn("Invalid value encountered in median for" +
" %d results" % np.count_nonzero(n.ravel()),
RuntimeWarning)
rout[n] = np.nan
return rout
else:
# if there are no nans
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(part[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int or sequence of int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the percentiles along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
percentile. This will save memory when you do not need to preserve
the contents of the input array. In this case you should not make
any assumptions about the content of the passed in array `a` after
this function completes -- treat it as undefined. Default is False.
Note that, if the `a` input is not already an array this parameter
will have no effect, `a` will be converted to an array internally
regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 1.9.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
If a single percentile `q` is given and axis=None a scalar is
returned. If multiple percentiles `q` are given an array holding
the result is returned. The results are listed in the first axis.
(If `out` is specified, in which case that array is returned
instead). If the input contains integers, or floats of smaller
precision than 64, then the output data-type is float64. Otherwise,
the output data-type is the same as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the q-th percentile of V is the q-th ranked
value in a sorted copy of V. The values and distances of the two
nearest neighbors as well as the `interpolation` parameter will
determine the percentile if the normalized ranking does not match q
exactly. This function is the same as the median if ``q=50``, the same
as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
array([ 3.5])
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([[ 7.],
[ 2.]])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
array([ 3.5])
"""
q = array(q, dtype=np.float64, copy=True)
r, k = _ureduce(a, func=_percentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
if q.ndim == 0:
return r.reshape(k)
else:
return r.reshape([len(q)] + k)
else:
return r
def _percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
a = asarray(a)
if q.ndim == 0:
# Do not allow 0-d arrays because following code fails for scalar
zerod = True
q = q[None]
else:
zerod = False
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.size < 10:
for i in range(q.size):
if q[i] < 0. or q[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
q[i] /= 100.
else:
# faster than any()
if np.count_nonzero(q < 0.) or np.count_nonzero(q > 100.):
raise ValueError("Percentiles must be in the range [0,100]")
q /= 100.
# prepare a for partioning
if overwrite_input:
if axis is None:
ap = a.ravel()
else:
ap = a
else:
if axis is None:
ap = a.flatten()
else:
ap = a.copy()
if axis is None:
axis = 0
Nx = ap.shape[axis]
indices = q * (Nx - 1)
# round fractional indices according to interpolation method
if interpolation == 'lower':
indices = floor(indices).astype(intp)
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
indices = floor(indices) + 0.5
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
pass # keep index as fraction and interpolate
else:
raise ValueError(
"interpolation can only be 'linear', 'lower' 'higher', "
"'midpoint', or 'nearest'")
n = np.array(False, dtype=bool) # check for nan's flag
if indices.dtype == intp: # take the points along axis
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices = indices[:-1]
n = np.isnan(ap[-1:, ...])
if zerod:
indices = indices[0]
r = take(ap, indices, axis=axis, out=out)
else: # weight the points above and below the indices
indices_below = floor(indices).astype(intp)
indices_above = indices_below + 1
indices_above[indices_above > Nx - 1] = Nx - 1
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
weights_below = 1.0 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
weights_below.shape = weights_shape
weights_above.shape = weights_shape
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
ap = np.rollaxis(ap, axis, 0)
weights_below = np.rollaxis(weights_below, axis, 0)
weights_above = np.rollaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
if np.issubdtype(a.dtype, np.inexact):
indices_above = indices_above[:-1]
n = np.isnan(ap[-1:, ...])
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
x1 = np.rollaxis(x1, axis, 0)
x2 = np.rollaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
x2 = x2.squeeze(0)
if out is not None:
r = add(x1, x2, out=out)
else:
r = add(x1, x2)
if np.any(n):
warnings.warn("Invalid value encountered in median",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
if out is not None:
out[...] = a.dtype.type(np.nan)
r = out
else:
r = a.dtype.type(np.nan)
else:
r[..., n.squeeze(0)] = a.dtype.type(np.nan)
else:
if r.ndim == 1:
r[:] = a.dtype.type(np.nan)
else:
r[..., n.repeat(q.size, 0)] = a.dtype.type(np.nan)
return r
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
add_docstring(new, doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new, doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further
use of `mask`.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
if ndim == 0:
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from delete and raise an error", DeprecationWarning)
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
if wrap:
return wrap(arr.copy())
else:
return arr.copy()
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop-start, dtype=bool)
keep[:stop-start:step] = False
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(start, stop)
arr = arr[slobj2]
slobj2[axis] = keep
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
else:
return new
_obj = obj
obj = np.asarray(obj)
# After removing the special handling of booleans and out of
# bounds values, the conversion to the array can be removed.
if obj.dtype == bool:
warnings.warn(
"in the future insert will treat boolean arrays and array-likes "
"as boolean index instead of casting it to integer", FutureWarning)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
obj = obj.item()
if (obj < -N or obj >= N):
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1, None)
new[slobj] = arr[slobj2]
else:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
if not np.can_cast(obj, intp, 'same_kind'):
# obj.size = 1 special case always failed and would just
# give superfluous warnings.
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
"error in the future", DeprecationWarning)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
# Test if there are out of bound indices, this is deprecated
inside_bounds = (obj < N) & (obj >= -N)
if not inside_bounds.all():
# 2013-09-24, 1.9
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
DeprecationWarning)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
"`numpy.delete`.", FutureWarning)
obj = obj[positive_indices]
keep[obj, ] = False
slobj[axis] = keep
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
.. versionadded:: 1.8.0
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
Difference between sequence and scalars:
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
>>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),
... np.insert(a, [1], [[1],[2],[3]], axis=1))
True
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim - 1
else:
if ndim > 0 and (axis < -ndim or axis >= ndim):
raise IndexError(
"axis %i is out of bounds for an array of "
"dimension %i" % (axis, ndim))
if (axis < 0):
axis += ndim
if (ndim == 0):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
"from insert and raise an error", DeprecationWarning)
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), **{'dtype': intp})
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
# See also delete
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
"integer", FutureWarning)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
# raise ValueError('boolean array argument obj to insert '
# 'must be one dimensional')
#indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(
"index %i is out of bounds for axis %i with "
"size %i" % (obj, axis, N))
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, index)
new[slobj] = arr[slobj]
slobj[axis] = slice(index, index+numnew)
new[slobj] = values
slobj[axis] = slice(index+numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
if not np.can_cast(indices, intp, 'same_kind'):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
"error in the future", DeprecationWarning)
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| bsd-3-clause |
rsivapr/scikit-learn | examples/plot_permutation_test_for_classification.py | 8 | 2208 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(y, 2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
pl.hist(permutation_scores, 20, label='Permutation scores')
ylim = pl.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#pl.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#pl.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
pl.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
pl.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
pl.ylim(ylim)
pl.legend()
pl.xlabel('Score')
pl.show()
| bsd-3-clause |
bd-j/pyxydust | pyxydust/pyxydust.py | 1 | 11856 | import os, time
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as pl
from sedpy import observate
import dustmodel
import statutils
import datacube
class Pyxydust(object):
doresid = False
def __init__(self, rp):
"""Initialize.
:param rp:
Dictionary containing a number of important parameters.
"""
self.rp = rp
self.load_models()
self.set_default_params()
def load_models(self):
"""Load the Draine & Li basis models, initialize the grid to
hold resampled models, and load the filters
"""
# Draine and Li Basis
self.dl07 = dustmodel.DraineLi()
# object to hold the model grid
self.dustgrid = dustmodel.SpecLibrary()
# filter objects
self.filterlist = observate.load_filters(self.rp['fnamelist'])
def load_data(self):
"""Read the image cube and the uncertainty cube, apply
distance modulus, and determine 'good' pixels
"""
dat = datacube.load_image_cube(**self.rp)
self.data_mag, self.data_magerr, self.rp['data_header'] = dat
dm = 5.0 * np.log10(self.rp['dist']) + 25
self.data_mag = np.where(self.data_mag != 0.0, self.data_mag - dm, 0.0)
self.nx, self.ny = self.data_mag.shape[0], self.data_mag.shape[1]
gg = np.where((self.data_mag < 0) & np.isfinite(self.data_mag), 1, 0)
# restrict to detections in all bands
self.goodpix = np.where(gg.sum(axis=2) == len(self.rp['imnamelist']))
def setup_output(self):
"""Create arrays to store fit output for each pixel.
"""
self.max_lnprob = np.zeros([self.nx, self.ny]) + float('NaN')
try:
self.outparnames = self.rp['outparnames'] + ['LDUST', 'MDUST']
except (KeyError):
self.outparnames = ['LDUST', 'MDUST']
self.parval = {}
for parn in self.outparnames:
shape = [self.nx, self.ny, len(self.rp['percentiles']) + 1]
self.parval[parn] = np.zeros(shape) + float('NaN')
# self.doresid = self.rp.get('return_residuals', False)
try:
self.doresid = self.rp['return_residuals']
except (KeyError):
self.doresid = False
if self.doresid is True:
self.delta_best = np.zeros([self.nx, self.ny,
len(self.filterlist)]) + float('NaN')
self.dobestfitspectrum = self.rp.get('return_best_spectrum', False)
if self.dobestfitspectrum:
self.best_spectrum = np.zeros([self.nx, self.ny,
len(self.dl07.wavelength)]) + float('NaN')
def fit_image(self):
"""Fit every pixel in an image.
"""
if hasattr(self, 'max_lnprob') is False:
self.setup_output()
start = time.time()
for ipix in xrange(self.goodpix[0].shape[0]):
iy, ix = self.goodpix[0][ipix], self.goodpix[1][ipix]
self.fit_pixel(ix, iy)
duration = time.time() - start
print('Done all pixels in {0:.1f} seconds'.format(duration))
def write_output(self):
"""Write stored fit information to FITS files.
"""
header = self.rp['data_header']
outfile = '{outname}_CHIBEST.fits'.format(**self.rp)
pyfits.writeto(outfile, -2 * self.max_lnprob, header=header,
clobber=True)
for i, parn in enumerate(self.outparnames):
# header.set('BUNIT',unit[i])
outfile = '{0}_{1}_bestfit.fits'.format(self.rp['outname'], parn)
pyfits.writeto(outfile, self.parval[parn][:,:,-1],
header=header, clobber=True)
for j, percent in enumerate(self.rp['percentiles']):
outfile = '{0}_{1}_p{2:5.3f}.fits'.format(self.rp['outname'],
parn, percent)
pyfits.writeto(outfile, self.parval[parn][:,:,j],
header=header, clobber=True)
if self.doresid:
for i, fname in enumerate(self.rp['fnamelist']):
outfile = '{0}_{1}_{2}.fits'.format(self.rp['outname'],
fname, 'bestfit_residual')
pyfits.writeto(outfile, self.delta_best[:,:,i],
header=header, clobber=True)
if self.dobestfitspectrum:
outfile = '{0}_bestfit_spectrum.fits'.format(self.rp['outname'])
pyfits.writeto(outfile, self.best_spectrum, clobber=True)
class PyxydustGrid(Pyxydust):
def initialize_grid(self, params=None):
"""Draw grid or library parameters from prior distributions
and build the grid.
"""
if params is not None:
self.params = params
parnames = self.params.keys()
theta = np.zeros([self.rp['ngrid'], len(parnames)])
for j, parn in enumerate(parnames):
pmin, pmax = self.params[parn]['min'], self.params[parn]['max']
n = self.rp['ngrid']
theta[:,j] = np.random.uniform(pmin, pmax, n)
if self.params[parn]['type'] == 'log':
theta[:,j] = 10**theta[:,j] # deal with uniform log priors
start = time.time()
self.dustgrid.set_pars(theta, parnames)
dat = self.dl07.generateSEDs(self.dustgrid.pars,self.filterlist,
wave_min=self.rp['wave_min'],
wave_max=self.rp['wave_max'])
self.dustgrid.sed, self.dustgrid.lbol, tmp = dat
umin = self.dustgrid.pars['UMIN']
umax = self.dustgrid.pars['UMAX']
gamma = self.dustgrid.pars['GAMMA']
ubar = self.dl07.ubar(umin, umax, gamma)
self.dustgrid.add_par(ubar,'UBAR')
duration = time.time() - start
print('Model Grid built in {0:.1f} seconds'.format(duration))
def fit_pixel(self, ix, iy, store=True, show_cdf=False):
"""
Determine \chi^2 of every model for a given pixel, and store
moments of the CDF for each parameter as well as the
bestfitting model parameters. Optionally store magnitude
residuals from the best fit.
"""
obs, err = self.data_mag[iy,ix,:], self.data_magerr[iy,ix,:]
mask = np.where((obs < 0) & np.isfinite(obs), 1, 0)
dat = statutils.lnprob_grid(self.dustgrid, obs, err, mask)
lnprob, ltir, dustm, delta_mag = dat
ind_isnum = np.where(np.isfinite(lnprob))[0]
lnprob_isnum = lnprob[ind_isnum]
ind_max = np.argmax(lnprob_isnum)
# this should all go to a storage method
self.max_lnprob[iy,ix] = np.max(lnprob_isnum)
self.delta_best[iy,ix,:] = delta_mag[ind_isnum[ind_max],:]
if self.dobestfitspectrum:
spec = self.dl07.spectra_from_pars(self.dustgrid.pars[ind_isnum[ind_max]])
self.best_spectrum[iy,ix,:] = (dustm[ind_isnum[ind_max]] * spec)
for i, parn in enumerate(self.outparnames):
if parn == 'LDUST':
par = np.squeeze(ltir)[ind_isnum] * self.dl07.convert_to_lsun
elif parn == 'MDUST':
par = np.squeeze(dustm)[ind_isnum]
else:
par = np.squeeze(self.dustgrid.pars[parn])[ind_isnum]
order = np.argsort(par)
cdf = (np.cumsum(np.exp(lnprob_isnum[order])) /
np.sum(np.exp(lnprob_isnum)))
ind_ptiles = np.searchsorted(cdf, self.rp['percentiles'])
# should linear interpolate instead of average.
self.parval[parn][iy,ix,:-1] = (par[order[ind_ptiles-1]] +
par[order[ind_ptiles]]) / 2.0
self.parval[parn][iy,ix,-1] = par[ind_max]
def set_default_params(self):
"""Set the default model parameter properties.
"""
# should be list of dicts or dict of lists? no, dict of dicts!
qpahmax = self.dl07.par_range(['QPAH'],
inds=[self.dl07.delta_inds])[0][1]
self.params = {}
self.params['UMIN'] = {'min':np.log10(0.1), 'max':np.log10(25),
'type':'log'}
self.params['UMAX'] = {'min':3, 'max':6, 'type':'log'}
self.params['GAMMA'] = {'min':0, 'max':1.0, 'type':'linear'}
self.params['QPAH'] = {'min':0.47, 'max':qpahmax, 'type':'log'}
class PyxydustMCMC(Pyxydust):
"""Use emcee to do MCMC sampling of the parameter space for a
given pixel.
Wildly unfinished/untested
"""
def set_default_params(self, large_number=1e15):
"""Set the default model parameter ranges.
"""
# should be list of dicts or dict of lists? no, dict of dicts!
qpahmax = self.dl07.par_range(['QPAH'],
inds=[self.dl07.delta_inds])[0][1]
self.params = {}
self.params['UMIN'] = {'min': np.log10(0.1), 'max':np.log10(25),
'type':'log'}
self.params['UMAX'] = {'min': 3, 'max':6, 'type':'log'}
self.params['GAMMA'] = {'min': 0, 'max':1.0, 'type':'linear'}
self.params['QPAH'] = {'min': 0.47, 'max':qpahmax, 'type':'log'}
self.params['MDUST'] = {'min':0, 'max':large_number, 'type': 'linear'}
def fit_pixel(self, ix, iy):
obs, err = self.data_mag[ix,iy,:], self.data_magerr[ix,iy,:]
obs_maggies = 10**(0 - obs / 2.5)
obs_ivar = (obs_maggies * err / 1.086)**(-2)
mask = np.where((obs < 0) & np.isfinite(obs), 1, 0)
sampler = self.sample(obs_maggies, obs_ivar, mask)
def sample(self,obs_maggies, obs_ivar, mask):
initial = self.initial_proposal()
# get a sampler, burn it in, and reset
sampler = emcee.EnsembleSampler(self.rp['nwalkers'],
self.rp['ndim'], self.lnprob,
threads=nthreads,
args=[obs_maggies,obs_ivar,mask])
pos, prob, state, blob = sampler.run_mcmc(initial, self.rp['nburn'])
sampler.reset()
# cry havoc
sampler.run_mcmc(np.array(pos),self.rp['nsteps'], rstate0=state)
return sampler
def initial_proposal(self):
parnames = self.lnprob.lnprob_parnames
theta = np.zeros(len(parnames))
for j, parn in enumerate(parnames):
theta[:,j] = np.random.uniform(self.params[parn]['min'],
self.params[parn]['max'])
return theta
# def model(self, umin=umin, umax=umax, gamma=gamma, mdust=mdust, alpha=2):
# pass
def lnprob(self, theta, obs_maggies, obs_ivar, mask):
lnprob_parnames = ['UMIN', 'UMAX', 'GAMMA', 'QPAH', 'MDUST']
# ugh. need quick dict or struct_array from list/array
# pardict = {lnprob_parnames theta}
# prior bounds check
ptest = []
for i,par in enumerate(lnprob_parnames):
ptest.append(pardict[par] >= self.params[par]['min'])
ptest.append(pardict[par] <= self.params[par]['max'])
if self.params[par]['type'] == 'log':
pardict[par] = 10**pardict[par]
if False in ptest:
# set lnp to -infty if parameters out of prior bounds
lnprob = -np.infty
lbol = -1
else:
# model sed (in AB absolute mag) for these parameters
sed, lbol = model(**pardict)
sed_maggies = 10**(0 - sed / 2.5)
# probability
chi2 = ((sed_maggies - obs_maggies)**2) * obs_ivar
inds = np.where(mask > 0)
lnprob = -0.5 * chi2[inds].sum()
return lnprob, [lbol]
| gpl-2.0 |
anirudhjayaraman/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | Keras_tensorflow/source/tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 18 | 6444 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: int, number of threads used for reading and enqueueing.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
"""
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions.enqueue_data(
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| mit |
kaichogami/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
treycausey/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
henrykironde/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
gpfreitas/bokeh | examples/interactions/interactive_bubble/gapminder.py | 8 | 4161 | import pandas as pd
from jinja2 import Template
from bokeh.browserlib import view
from bokeh.models import (
ColumnDataSource, Plot, Circle, Range1d,
LinearAxis, HoverTool, Text,
SingleIntervalTicker, CustomJS, Slider
)
from bokeh.palettes import Spectral6
from bokeh.plotting import vplot
from bokeh.resources import JSResources
from bokeh.embed import file_html
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions = process_data()
sources = {}
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(SingleIntervalTicker(interval=1), axis_label="Children per woman (total fertility)", **AXIS_FORMATS)
yaxis = LinearAxis(SingleIntervalTicker(interval=20), axis_label="Life expectancy at birth (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# ### Add the background year text
# We add this first so it is below all the other glyphs
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
# Add the legend
text_x = 7
text_y = 95
for i, region in enumerate(regions):
plot.add_glyph(Text(x=text_x, y=text_y, text=[region], text_font_size='10pt', text_color='#666666'))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], size=10, line_color=None, fill_alpha=0.8))
text_y = text_y - 5
# Add the slider
code = """
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
text_source.set('data', {'year': [String(year)]});
""" % js_source_array
callback = CustomJS(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback, name='testy')
callback.args["renderer_source"] = renderer_source
callback.args["slider"] = slider
callback.args["text_source"] = text_source
# Stick the plot and the slider together
layout = vplot(plot, slider)
# Open our custom template
with open('gapminder_template.jinja', 'r') as f:
template = Template(f.read())
# Use inline resources, render the html and open
js_resources = JSResources(mode='inline')
title = "Bokeh - Gapminder Bubble Plot"
html = file_html(layout, None, title, template=template, js_resources=js_resources)
output_file = 'gapminder.html'
with open(output_file, 'w') as f:
f.write(html)
view(output_file)
| bsd-3-clause |
CopyChat/Plotting | Python/SSR_changes_SWIO/rsds_SWIO_OBS.py | 1 | 6224 | #!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import netCDF4
import pylab as pl
import numpy as np
import matplotlib as mpl
import datetime
from netCDF4 import num2date
import Scientific.IO.NetCDF as IO
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.dates as mdates
from matplotlib.dates import YearLocator,MonthLocator,DateFormatter,drange
pl.close('all')
########################## OBS location:
# all the plot file shoud be here:
OBSDIR='/Users/ctang/Code/Plotting/Python/SSR_changes_SWIO'
ProjOBSnc=['NCEP-NCAR','ERA_40','ERA_Interim',\
'CM_SAF-CDR','CM_SAF-SARAH-E','CERES']
ProjOBStxt=['MeteoFrance_39site_RUN',\
'MeteoFrance_GILLOT',\
'HelioClim-1']
OBSncfile=[\
'dswrf.sfc.gauss.yearmean.fldmean.1948-2015.swio.nc',\
'ERA_40.ssrd.ssr.ssrc.year.mean.1958-2001.fldmean.swio.nc',\
'ERA.ssrd.ssr.ssrc.yearmean.1979-2014.fldmean.swio.nc',\
'SISmm.CDR.yearmean.fldmean.198301-200512.swio.nc',\
'SISmm.SARAH-E.yearmean.fldmean.199901-201512.swio.nc',\
'rsds_CERES-EBAF_L3B_Ed2-8_2001-2013.swio.fldmean.yearmean.nc']
OBStxtfile=['yearly.meteofrance.missing.removed',\
'Meteofrance.Gillot.1985-2015.year.mean.txt',\
'rsds.HelioClim-1.1985-2005.txt']
VARIABLE=['dswrf','ssrd','ssrd','SIS','SIS','rsds']
COLORtar=['black','dodgerblue','darkgreen','pink',\
'purple','blue','darkmagenta','red','teal',\
'blue','purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
linestyles=['-', '-', '-', '-', '--',\
'-','-','-', '--',\
'-', '-', '--', ':']
#================================================ CMIP5 models
print "==============================================="
#=================================================== define the Plot:
fig,ax = plt.subplots(figsize=(16,9))
plt.xlabel('Year',fontsize=16)
plt.ylabel('Surface Downwelling Solar Radiation ( W/m2 )',fontsize=16)
plt.title("Surface Downwelling Solar Radiation over the SWIO",fontsize=18)
# vertical range ylim yrange
plt.ylim(190,255)
plt.yticks(np.arange(190,255, 10))
plt.xlim(datetime.datetime(1948,01,01),datetime.datetime(2020,12,31))
plt.grid()
ax.xaxis.set_major_locator(YearLocator(5)) # interval = 5
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m'))
ax.fmt_xdata = DateFormatter('%Y')
#plt.xticks(np.arange(1950, 2016, 5))
#plt.tick_params(axis='both', which='major', labelsize=14)
#plt.tick_params(axis='both', which='minor', labelsize=14)
#=================================================== 3 windows
#plt.axvspan(1950, 1980, alpha=0.2, color='teal')
#plt.axvspan(1980, 2000, alpha=0.2, color='teal')
#plt.axvspan(2000, 2016, alpha=0.2, color='teal')
#=================================================== draw lines
#plt.axvline(x=2005,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
##### to plot nc file
for obs in ProjOBSnc:
infile1=OBSDIR+'/'+OBSncfile[ProjOBSnc.index(obs)]
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the time to datetime
TIME=netCDF4.num2date(infile.variables['time'][:],\
infile.variables['time'].units,\
calendar=infile.variables['time'].calendar)
#TIME=[t.year for t in TIME]
#TIME=[t.strftime("%Y-%m") for t in TIME]
#TIME=mpl.dates.date2num(TIME)
# read the variable
SSR=infile.variables[VARIABLE[ProjOBSnc.index(obs)]][:,0,0].copy()
#=================================================== to plot
print "======== to plot =========="
print 'NO. of year:',len(TIME)
print TIME
print SSR
#plt.plot_date(mdates.datestr2num(TIME),SSR)
plt.plot(TIME,SSR,\
linestyles[ProjOBSnc.index(obs)],\
label=obs,\
color=COLORtar[ProjOBSnc.index(obs)],\
linewidth=2)
##### to plot the text file:
for obs in ProjOBStxt:
infile1=OBSDIR+'/'+OBStxtfile[ProjOBStxt.index(obs)]
print('the file is == ' +infile1)
TIME,SSR= np.loadtxt(infile1, unpack=True,\
converters={ 0: mdates.strpdate2num('%Y-%m')})
print " plotting text file ================== "
print TIME
print SSR
plt.plot(TIME,SSR,\
linestyles[ProjOBStxt.index(obs)+len(ProjOBSnc)],\
label=obs,\
color=COLORtar[ProjOBStxt.index(obs)+len(ProjOBSnc)],\
linewidth=2)
#===================================================
## plot the RegCM result driven by Had model
infile1=OBSDIR+'/rsds.Had_hist.SRF.year.mean.1996-2005.SA.fldmean.nc'
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
#define the TIME
date1 = datetime.datetime( 1996, 7, 1)
date2 = datetime.datetime( 2005, 7, 1)
delta = datetime.timedelta(days=365)
TIME = drange(date1, date2, delta)
# read the variable
SSR=infile.variables['rsds'][:,0,0].copy()
#=================================================== to plot
print "======== to plot =========="
print 'NO. of year:',len(TIME)
print TIME
print SSR
#plt.plot_date(mdates.datestr2num(TIME),SSR)
plt.plot(TIME,SSR,\
linestyles[10],\
label='Had+RegCM',\
color=COLORtar[10],\
linewidth=3)
#===================================================
### plot the HadGEM2-ES data
infile1=OBSDIR+'/rsds_Amon_HadGEM2-ES_historical-rcp85_r1i1p1_196101-209912.swiomean.yearmean.nc'
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
#define the TIME
date1 = datetime.datetime( 1961, 7, 1)
date2 = datetime.datetime( 2099, 7, 1)
delta = datetime.timedelta(days=365)
TIME = drange(date1, date2, delta)
# read the variable
SSR=infile.variables['rsds'][:,0,0].copy()
#=================================================== to plot
print "======== to plot =========="
print 'NO. of year:',len(TIME)
print TIME
print SSR
#plt.plot_date(mdates.datestr2num(TIME),SSR)
plt.plot(TIME,SSR,\
linestyles[10],\
label='HadGEM2-ES',\
color=COLORtar[12],\
linewidth=3)
fig.autofmt_xdate()
plt.legend(loc=2)
plt.show()
quit()
| gpl-3.0 |
ithemal/Ithemal | common/common_libs/graphs.py | 1 | 1592 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import utilities as ut
import numpy as np
import random
def plot_histogram(filename, values, maxvalue, xlabel, ylabel, title):
plt.figure()
plt.hist(self.values, bins=maxvalue, range=(0,maxvalue), edgecolor='black', linewidth=0.3)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.savefig(filename, bbox_inches='tight')
plt.close()
def plot_line_graphs(filename, losses, legend, ylabel='loss', xlabel='batch', title='Learning Curves', xmin = None, xmax = None, ymin = None, ymax = None):
plt.figure()
for loss, label in zip(losses, legend):
y = loss
x = np.arange(len(loss))
h = plt.plot(x,y, '.-', linewidth=1, markersize=2, label=label)
plt.legend()
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
cur_xmin, cur_xmax = plt.xlim()
cur_ymin, cur_ymax = plt.ylim()
if xmin != None and cur_xmin < xmin:
plt.xlim(xmin = xmin)
if ymin != None and cur_ymin < ymin:
plt.ylim(ymin = ymin)
if xmax != None and cur_xmax > xmax:
plt.xlim(xmax = xmax)
if ymax != None and cur_ymax > ymax:
plt.ylim(ymax = ymax)
plt.savefig(filename)
plt.close()
if __name__ == '__main__':
ys = []
labels = ['graph1', 'graph2']
for _ in range(2):
y = []
for i in range(random.randint(1,100)):
y.append(random.randint(0,100))
ys.append(y)
plot_line_graphs('test.png',ys,labels, xmin=0, xmax=50, ymin=0, ymax=40)
| mit |
IshankGulati/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 42 | 4843 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - PCA using randomized SVD',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
# Plot an image representing the pixelwise variance provided by the
# estimator e.g its noise_variance_ attribute. The Eigenfaces estimator,
# via the PCA decomposition, also provides a scalar noise_variance_
# (the mean of pixelwise variance) that cannot be displayed as an image
# so we skip it.
if (hasattr(estimator, 'noise_variance_') and
estimator.noise_variance_.ndim > 0): # Skip the Eigenfaces case
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
fovtran/pyoptools | pyoptools/misc/pmisc/misc.py | 9 | 18011 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as N
from numpy import array, sin, cos, float64, dot, float_, sqrt, ceil, floor, dot, \
meshgrid, zeros, zeros_like, where, nan, pi, isnan, nonzero, rint, \
linspace, arange, argwhere
from numpy.ma import is_masked, MaskedArray
from numpy.ma import array as ma_array
#from enthought.traits.api import Trait, TraitHandler
from scipy import interpolate
from pylab import griddata, meshgrid
'''Auxiliary functions and classes
'''
#~ class TraitUnitVector(TraitHandler):
#~ ''' Class to define unit vector trait
#~
#~ Description:
#~
#~ This class defines a unit vector. If the value assigned is not a unit
#~ vector, it gets automaticaly normalized
#~ '''
#~
#~ def validate(self, object, name, value):
#~ try:
#~ avalue=array(value)
#~ except:
#~ self.error(object, name, value)
#~
#~ if len(avalue.shape)!=1 or avalue.shape[0]!=3:
#~ return self.error(object, name, avalue)
#~
#~ avalue=array(avalue/sqrt(dot(avalue,avalue)))
#~ return avalue
#~
#~ # Trait to define a unit vector based on the unit vector trait
#~ UnitVector = Trait(array([0,0,1], float_),TraitUnitVector())
#~ print "Nota: Hay que revisar las convenciones de las rotaciones para que queden\n\r "\
#~ "consistentes en x,y,z. Me parece que hay un error en el signo de la \n\r rotacion"\
#~ "al rededor de alguno de los ejes. Modulo misc \n\r"\
#~ "si no estoy mal el error esta en la rotacion respecto a los ejez Y y Z"
def rot_x(tx):
'''Returns the transformation matrix for a rotation around the X axis
'''
return array([[1.,0. ,0. ],
[0.,cos(tx),-sin(tx)],
[0.,sin(tx), cos(tx)]]).astype(float64)
def rot_y(ty):
'''Returns the transformation matrix for a rotation around the Y axis
'''
return array([[ cos(ty),0. ,sin(ty) ],
[ 0. ,1 ,0. ],
[-sin(ty),0. ,cos(ty) ]]).astype(float64)
def rot_z(tz):
'''Returns the transformation matrix for a rotation around the Z axis
'''
return array([[ cos(tz),-sin(tz),0. ],
[ sin(tz), cos(tz),0. ],
[ 0. ,0. ,1. ]]).astype(float64)
#~ def rot_mat(r):
#~ '''Returns the transformation matrix for a rotation around the Z,Y,X axes
#~
#~ The rotation is made first around the Z axis, then around the Y axis, and
#~ finally around the X axis.
#~
#~ Parameters
#~
#~ r= (rx,ry,rz)
#~ '''
#~
#~ c=cos(r)
#~ s=sin(r)
#~
#~ rx=array([[1. , 0., 0.],
#~ [0. , c[0],-s[0]],
#~ [0. , s[0], c[0]]])
#~
#~ ry=array([[ c[1], 0., s[1]],
#~ [ 0., 1., 0.],
#~ [-s[1], 0., c[1]]])
#~
#~
#~ rz=array([[ c[2],-s[2], 0.],
#~ [ s[2], c[2], 0.],
#~ [ 0., 0., 1.]])
#~
#~
#~ tm=dot(rz,dot(ry,rx))
#~
#~ return tm
# To improve speed, this routine was moved to cmisc.pyx
#~ def rot_mat_i(r):
#~ '''Returns the inverse transformation matrix for a rotation around the Z,Y,X axes
#~
#~ Parameters
#~
#~ r= (rx,ry,rz)
#~ '''
#~
#~ c=cos(r)
#~ s=sin(r)
#~
#~ rx=array([[ 1., 0., 0.],
#~ [ 0., c[0], s[0]],
#~ [ 0.,-s[0], c[0]]])
#~
#~ ry=array([[ c[1], 0.,-s[1]],
#~ [ 0., 1., 0.],
#~ [ s[1], 0., c[1]]])
#~
#~
#~ rz=array([[ c[2], s[2], 0.],
#~ [-s[2], c[2], 0.],
#~ [ 0., 0., 1.]])
#~
#~ # Nota: se hizo una prueba para optimizar escribirndo la expresión del producto
#~ # escalar, y el resultado fue considerablemente mas lento, toca revisar
#~
#~
#~ return dot(rx,dot(ry,rz))
def cross(a,b):
'''3D Vector product producto vectorial '''
x1,y1,z1=a
x2,y2,z2=b
return array((y1*z2-y2*z1,x2*z1-x1*z2,x1*y2-x2*y1))
def wavelength2RGB(wl):
'''Function to aproximate and RGB tuple from the wavelength value
Parameter:
wavelength wavelength in um
if the wavelength is outside the visible spectrum returns (0,0,0)
Original code fount at:
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
R,G,B=0.,0.,0.
if (wl>=.380) & (wl<.440):
R = -1.*(wl-.440)/(.440-.380)
G = 0.
B = 1.
if (wl>=.440) & (wl<.490):
R = 0.
G = (wl-.440)/(.490-.440)
B = 1.
if (wl>=.490) & (wl<.510):
R = 0.
G = 1.
B = -1.*(wl-.510)/(.510-.490)
if (wl>=.510) & (wl<.580):
R = (wl-.510)/(.580-.510)
G = 1.
B = 0.
if (wl>=.580) & (wl<.645):
R = 1.
G = -1.*(wl-.645)/(.645-.580)
B = 0.
if (wl>=.645) & (wl < .780):
R = 1.
G = 0.
B = 0.
# LET THE INTENSITY FALL OFF NEAR THE VISION LIMITS
if (wl>=.700):
sss =.3+.7* (.780-wl)/(.780-.700)
elif (wl < .420) :
sss=.3+.7*(wl-.380)/(.420-.380)
else :
sss=1
R=R*sss
G=G*sss
B=B*sss
return (R,G,B)
def matrix_interpolation(M, i, j, type="bilinear"):
"""Returns the interpolated value of a matrix, when the indices i,j are floating
point numbers.
M
Matrix to interpolate
i,j
Indices to interpolate
type
Interpolation type. supported types: nearest,bilinear
"""
mi, mj=M.shape
if i<0 or i>mi-2 or j<0 or j>mj-2:
raise IndexError("matrix Indexes out of range")
# Allowed interpolation types
inter_types=["nearest","bilinear", ]
if not type in inter_types:
raise ValueError("Interpolation type not allowed. The allowed types"\
" are: {0}".format(inter_types))
if type=="nearest":
iri=int(round(i))
irj=int(round(j))
return M[iri, irj]
elif type=="bilinear":
i_s, j_s=floor((i, j))
#calc 1
m=M[i_s:i_s+2, j_s:j_s+2]
iv=array([1-(i-i_s), i-i_s])
jv=array([[1-(j-j_s),], [j-j_s, ]])
return dot(iv, dot(m, jv))[0]
#dx=i-i_s
#dy=j-j_s
##print i, j, i_s, j_s, dx, dy
#p1=dx*dy*M[i_s, j_s]
#p2=(1.-dx)*dy*M[i_s+1, j_s]
#p3=dx*(1.-dy)*M[i_s, j_s+1]
#p4=(1.-dx)*(1.-dy)*M[i_s+1, j_s+1]
#return p1+ p2+ p3+ p4
print "error"
return 1.
def hitlist2int(x, y, z, xi, yi):
"""Function that estimates an intensity distribution on a plane from a
ray hitlist
"""
import matplotlib.delaunay as delaunay
from pylab import griddata, meshgrid
from scipy import interpolate
#if xi.ndim != yi.ndim:
# raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
#if xi.ndim != 1 and xi.ndim != 2:
# raise TypeError("inputs xi and yi must be 1D or 2D.")
#if not len(x)==len(y)==len(z):
# raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
#if hasattr(z,'mask'):
# x = x.compress(z.mask == False)
# y = y.compress(z.mask == False)
# z = z.compressed()
#if xi.ndim == 1:
# xi,yi = meshgrid(xi,yi)
#triangulate data
tri=delaunay.Triangulation(x, y)
#calculate triangles area
ntriangles=tri.circumcenters.shape[0]
coord=array(zip(tri.x, tri.y))
#I=zeros((ntriangles, ))
#xc=zeros((ntriangles, ))
#yc=zeros((ntriangles, ))
# for i in range(ntriangles):
# i1, i2, i3=tri.triangle_nodes[i]
# p1=coord[i1]
# p2=coord[i2]
# p3=coord[i3]
# v1=p1-p2
# v2=p3-p2
# I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0]))
# # the circumcenter data from the triangulation, has some problems so we
# # recalculate it
# xc[i], yc[i]=(p1+p2+p3)/3.
# The previous code was replaced by the following code
###
i1=tri.triangle_nodes[:, 0]
i2=tri.triangle_nodes[:, 1]
i3=tri.triangle_nodes[:, 2]
p1=coord[i1]
p2=coord[i2]
p3=coord[i3]
v1=p1-p2
v2=p3-p2
I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0]))
c=(p1+p2+p3)/3.
xc=c[:, 0]
yc=c[:, 1]
###
# Because of the triangulation algorithm, there are some really high values
# in the intensity data. To filter these values, remove the 5% points of the
# higher intensity.
ni=int(0.1*len(I))
j=I.argsort()[:-ni]
xc=xc[j]
yc=yc[j]
I=I[j]
I=I/I.max()
# #print tri.circumcenters[:, 0]
# #print tri.circumcenters.shape
# print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape
#itri=delaunay.Triangulation(xc,yc)
#inti=itri.linear_interpolator(I)
#xi,yi = meshgrid(xi,yi)
#d1=itri(xi, yi)
#Interpolacion con Splines
#di=interpolate.SmoothBivariateSpline(xc, yc, I)
#d1=di(xi,yi)
#Interpolacion nn, y generación de pupila
xi,yi = meshgrid(xi,yi)
d1=griddata(xc, yc, I,xi, yi )
return d1
def hitlist2int_list(x, y):
"""Function that estimates an intensity distribution on a plane from a
ray hitlist. Returns the intensity samples as an x,y,I list
"""
import matplotlib.delaunay as delaunay
from pylab import griddata, meshgrid
from scipy import interpolate
#if xi.ndim != yi.ndim:
# raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
#if xi.ndim != 1 and xi.ndim != 2:
# raise TypeError("inputs xi and yi must be 1D or 2D.")
#if not len(x)==len(y)==len(z):
# raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
#if hasattr(z,'mask'):
# x = x.compress(z.mask == False)
# y = y.compress(z.mask == False)
# z = z.compressed()
#if xi.ndim == 1:
# xi,yi = meshgrid(xi,yi)
#triangulate data
tri=delaunay.Triangulation(x, y)
#calculate triangles area
ntriangles=tri.circumcenters.shape[0]
coord=array(zip(tri.x, tri.y))
#I=zeros((ntriangles, ))
#xc=zeros((ntriangles, ))
#yc=zeros((ntriangles, ))
# for i in range(ntriangles):
# i1, i2, i3=tri.triangle_nodes[i]
# p1=coord[i1]
# p2=coord[i2]
# p3=coord[i3]
# v1=p1-p2
# v2=p3-p2
# I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0]))
# # the circumcenter data from the triangulation, has some problems so we
# # recalculate it
# xc[i], yc[i]=(p1+p2+p3)/3.
# The previous code was replaced by the following code
###
i1=tri.triangle_nodes[:, 0]
i2=tri.triangle_nodes[:, 1]
i3=tri.triangle_nodes[:, 2]
p1=coord[i1]
p2=coord[i2]
p3=coord[i3]
v1=p1-p2
v2=p3-p2
I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0]))
c=(p1+p2+p3)/3.
xc=c[:, 0]
yc=c[:, 1]
###
# Because of the triangulation algorithm, there are some really high values
# in the intensity data. To filter these values, remove the 5% points of the
# higher intensity.
ni=int(0.1*len(I))
j=I.argsort()[:-ni]
xc=xc[j]
yc=yc[j]
I=I[j]
I=I/I.max()
# #print tri.circumcenters[:, 0]
# #print tri.circumcenters.shape
# print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape
#itri=delaunay.Triangulation(xc,yc)
#inti=itri.linear_interpolator(I)
#xi,yi = meshgrid(xi,yi)
#d1=itri(xi, yi)
#Interpolacion con Splines
#di=interpolate.SmoothBivariateSpline(xc, yc, I)
#d1=di(xi,yi)
return xc,yc,I
def unwrapv(inph,in_p=(), uv=2*pi):
"""Return the input matrix unwraped the value given in uv
This is a vectorized routine, but is not as fast as it should
"""
if not is_masked(inph):
fasei=MaskedArray(inph, isnan(inph))
else:
fasei=inph.copy()
size=fasei.shape
nx, ny=size
# If the initial unwraping point is not given, take the center of the image
# as initial coordinate
if in_p==():
in_p=(int(size[0]/2),int(size[1]/2))
# Create a temporal space to mark if the points are already unwrapped
# 0 the point has not been unwrapped
# 1 the point has not been unwrapped, but it is in the unwrapping list
# 2 the point was already unwrapped
fl=N.zeros(size)
# List containing the points to unwrap
l_un=[in_p]
fl[in_p]=1
# unwrapped values
faseo=fasei.copy()
XI_, YI_= meshgrid(range(-1, 2), range(-1, 2))
XI_=XI_.flatten()
YI_=YI_.flatten()
while len(l_un)>0:
# remove the first value from the list
unp=l_un.pop(0)
#l_un[0:1]=[]
XI=XI_+unp[0]
YI=YI_+unp[1]
#Remove from the list the values where XI is negative
nxi=XI>-1
nyi=YI>-1
nxf=XI<nx
nyf=YI<ny
n=nonzero(nxi& nyi & nxf & nyf)
lco=zip(XI[n], YI[n])
# Put the coordinates of unwrapped the neigbors in the list
# And check for wrapping
nv=0
wv=0
for co in lco:
if (fl[co]==0) & (faseo.mask[co]==False):
fl[co]=1
l_un.append(co)
elif fl[co]==2:
wv=wv+rint((faseo[co]-faseo[unp])/uv)
nv=nv+1
if nv!=0:
wv=wv/nv
#if wv>=0: wv=int(wv+0.5)
#else: wv=int(wv-0.5)
fl[unp]=2
faseo[unp]=faseo[unp]+wv*uv
return faseo
def unwrap_py(inph,in_p=(), uv=2*pi):
"""Return the input matrix unwraped the valu given in uv
The same as unwrapv, but using for-s, written in python
"""
if not is_masked(inph):
fasei=MaskedArray(inph, isnan(inph))
else:
fasei=inph
nx, ny=(fasei.shape[0],fasei.shape[1])
# If the initial unwraping point is not given, take the center of the image
# as initial coordinate
if in_p==():
in_p=(int(nx/2),int(ny/2))
# Create a temporal space to mark if the points are already unwrapped
# 0 the point has not been unwrapped
# 1 the point has not been unwrapped, but it is in the unwrapping list
# 2 the point was already unwrapped
fl=zeros((nx, ny))
# List containing the points to unwrap
l_un=[in_p]
fl[in_p]=1
# unwrapped values
faseo=fasei.copy()
while len(l_un)>0:
# remove the first value from the list
cx, cy=l_un.pop(0)
# Put the coordinates of unwrapped the neigbors in the list
# And check for wrapping
nv=0
wv=0
for i in range(cx-1, cx+2):
for j in range(cy-1, cy+2):
if (i>-1) and (i<nx) and (j>-1) and (j<ny):
if (fl[i, j]==0)&(faseo.mask[i, j]==False):
fl[i, j]=1
l_un.append((i, j))
elif fl[i, j]==2:
wv=wv+rint((faseo[i, j]-faseo[cx, cy])/uv)
nv=nv+1
if nv!=0:
wv=wv/nv
fl[cx, cy]=2
faseo[cx, cy]=faseo[cx, cy]+wv*uv
return faseo
def interpolate_g(xi,yi,zi,xx,yy,knots=10, error=False,mask=None):
"""Create a grid of zi values interpolating the values from xi,yi,zi
xi,yi,zi 1D Lists or arrays containing the values to use as base for the interpolation
xx,yy 1D vectors or lists containing the output coordinates
samples tuple containing the shape of the output array.
knots number of knots to be used in each direction
error if set to true, half of the points (x, y, z) are used to create
the interpolation, and half are used to evaluate the interpolation error
"""
xi=array(xi)
yi=array(yi)
zi=array(zi)
#print xi
#print yi
#print zi
assert xi.ndim==1 ,"xi must ba a 1D array or list"
assert yi.ndim==1 ,"yi must ba a 1D array or list"
assert zi.ndim==1 ,"zi must ba a 1D array or list"
assert xx.ndim==1 ,"xx must ba a 1D array or list"
assert yy.ndim==1 ,"yy must ba a 1D array or list"
assert len(xi)==len(yi) and len(xi)==len(zi), "xi, yi, zi must have the same number of items"
if error==True:
# Create a list of indexes to be able to select the points that are going
# to be used as spline generators, and as control points
idx=where(arange(len(xi)) %2 ==0, False, True)
# Use only half of the samples to create the Spline,
if error == True:
isp=argwhere(idx==True)
ich=argwhere(idx==False)
xsp=xi[isp]
ysp=yi[isp]
zsp=zi[isp]
xch=xi[ich]
ych=yi[ich]
zch=zi[ich]
else:
xsp=xi
ysp=yi
zsp=zi
#Distribute homogeneously the knots
xk=linspace(xsp.min(), xsp.max(),knots)
yk=linspace(ysp.min(), ysp.max(),knots)
# LSQBivariateSpline using some knots gives smaller error than
# SmoothBivariateSpline
di=interpolate.LSQBivariateSpline(xsp, ysp, zsp, xk[1:-1], yk[1:-1])
#print xsp,ysp,zsp
#di=interpolate.SmoothBivariateSpline(xsp, ysp, zsp)
# Evaluate error
if error==True:
zch1=di.ev(xch, ych)
er=(zch.flatten()-zch1).std()
if mask==None:
#d=griddata(xi, yi, zi, xx, yy) #
d=di(xx,yy).transpose()
else:
d=ma_array(di(xx,yy).transpose(), mask=mask)
if error==True: return d, er
else: return d
####### Fin Funciones auxiliares
| bsd-3-clause |
jskew/gnuradio | gr-utils/python/utils/plot_psd_base.py | 75 | 12725 | #!/usr/bin/env python
#
# Copyright 2007,2008,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
from gnuradio.eng_option import eng_option
class plot_psd_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename),
weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ",
weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = scipy.array(self.sp_iq.get_xlim())
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
return False
else:
# retesting length here as newer version of scipy does not throw a MemoryError, just
# returns a zero-length array
if(len(self.iq) > 0):
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.iq_psd, self.freq = self.dopsd(self.iq)
return True
else:
print "End of File"
return False
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap)
psd = 10.0*log10(abs(psd))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
r = self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time(self.time, self.iq) # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd(self.freq, self.iq_psd) # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec(self.time, self.iq)
draw()
def draw_time(self, t, iq):
reals = iq.real
imags = iq.imag
self.plot_iq[0].set_data([t, reals])
self.plot_iq[1].set_data([t, imags])
self.sp_iq.set_xlim(t.min(), t.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_psd(self, f, p):
self.plot_psd[0].set_data([f, p])
self.sp_psd.set_ylim([p.min()-10, p.max()+10])
self.sp_psd.set_xlim([f.min(), f.max()])
def draw_spec(self, t, s):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(s, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[t.min(), t.max()])
def update_plots(self):
self.draw_time(self.time, self.iq)
self.draw_psd(self.freq, self.iq_psd)
if self.dospec:
self.draw_spec(self.time, self.iq)
self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
#xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))
iq = scipy.array(self.iq[xmin : xmax])
time = scipy.array(self.time[xmin : xmax])
iq_psd, freq = self.dopsd(iq)
self.draw_psd(freq, iq_psd)
self.xlim = scipy.array(self.sp_iq.get_xlim())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
r = self.get_data()
if(r):
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
r = self.get_data()
if(r):
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(option_class=eng_option, conflict_handler="resolve",
usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_psd_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_psd_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
codesociety/friartuck | friartuck/alphavantage/alphavantage.py | 1 | 7326 | import json
from datetime import timedelta, datetime
import pandas as pd
import urllib.request
class AlphaVantage(object):
def __init__(self, apikey):
self.apikey = apikey
pass
def get_quote_daily(self, symbol, bars=22):
output_size = "compact"
if bars > 100:
output_size = 'full'
url = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&datatype=json&outputsize=%s&apikey=%s" % (symbol.lower(), output_size, self.apikey)
print(url)
quote_bars = None
with urllib.request.urlopen(url) as response:
content = response.read()
# resp, content = self.client.request(url, "GET")
# print(content)
data = json.loads(content.decode('utf-8'))
# print(data)
if "Time Series (Daily)" in data:
quotes = data["Time Series (Daily)"]
# print(quotes)
print(len(quotes))
for date_str in quotes:
quote_data = quotes[date_str]
quote_date = datetime.strptime(date_str, "%Y-%m-%d")
bar = pd.DataFrame(index=pd.DatetimeIndex([quote_date]),
data={'price': float(quote_data['4. close']),
'open': float(quote_data['1. open']),
'high': float(quote_data['2. high']),
'low': float(quote_data['3. low']),
'close': float(quote_data['4. close']),
'volume': int(quote_data['5. volume']),
'date': quote_date})
# print(close)
if quote_bars is None:
quote_bars = bar
else:
quote_bars = bar.append(quote_bars)
if quote_bars is None:
# log.warn("Unexpected, could not retrieve quote for security (%s) " % symbol)
# bars = pd.DataFrame(index=[6], columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'])
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
bars = pd.DataFrame(index=pd.DatetimeIndex([quote_date]), columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'],
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0),
'date': quote_date})
# print(bars)
quote_bars.sort_index(inplace=True)
return quote_bars.tail(bars)
def get_quote_intraday(self, symbol, since_last_quote_time, interval='5min'):
if not since_last_quote_time:
since_last_quote_time = datetime.now().replace(hour=8, minute=25, second=0, microsecond=0)
output_size = "compact"
if interval == '1min':
start_time = since_last_quote_time
end_time = start_time.replace(hour=15, minute=0, second=0, microsecond=0)
now_time = datetime.now().replace(second=0, microsecond=0)
if now_time < end_time:
end_time = now_time
diff = (end_time - start_time).seconds / 60
print("diff: %s, start(%s) end(%s)" % (diff, start_time, end_time))
if diff > 100:
output_size = 'full'
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=%s&datatype=json&outputsize=%s&interval=%s&apikey=%s" % (symbol.lower(), output_size, interval, self.apikey)
print(url)
quote_bars = None
with urllib.request.urlopen(url) as response:
content = response.read()
# resp, content = self.client.request(url, "GET")
# print(content)
data = json.loads(content.decode('utf-8'))
# print(data)
connected = False
# quote_dates = []
time_series_key = "Time Series (%s)" % interval
if time_series_key in data:
quotes = data[time_series_key]
# print(quotes)
print("since_last_quote_time(%s), returned length(%s)" % (since_last_quote_time, len(quotes)))
if len(quotes) > 0:
connected = True
for date_str in quotes:
quote_data = quotes[date_str]
minute_adjust = 0
"""
if interval != "1min":
minute_adjust = int(interval.replace("min", ""))
"""
quote_date = datetime.strptime(date_str, "%Y-%m-%d %H:%M:00") - timedelta(hours=1, minutes=minute_adjust)
# quote_dates.append(quote_date.strftime("%Y-%m-%d %H:%M:00"))
if since_last_quote_time >= quote_date:
continue
bar = pd.DataFrame(index=pd.DatetimeIndex([quote_date]), columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date', 'connected'],
data={'price': float(quote_data['4. close']),
'open': float(quote_data['1. open']),
'high': float(quote_data['2. high']),
'low': float(quote_data['3. low']),
'close': float(quote_data['4. close']),
'volume': int(quote_data['5. volume']),
'date': quote_date,
'connected': connected})
# print(close)
if quote_bars is None:
quote_bars = bar
else:
quote_bars = bar.append(quote_bars)
# print("quote_dates %s" % quote_dates)
if quote_bars is None:
# log.warn("Unexpected, could not retrieve quote for security (%s) " % symbol)
# bars = pd.DataFrame(index=[6], columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date'])
quote_date = datetime.now()
quote_date = quote_date.replace(second=0, microsecond=0)
quote_bars = pd.DataFrame(index=pd.DatetimeIndex([quote_date]), columns=['price', 'open', 'high', 'low', 'close', 'volume', 'date', 'connected'],
data={'price': float("nan"),
'open': float("nan"),
'high': float("nan"),
'low': float("nan"),
'close': float("nan"),
'volume': int(0),
'date': quote_date,
'connected': connected})
# print(bars)
quote_bars.sort_index(inplace=True)
return quote_bars
def is_valid_value(value, default):
if value != -1:
return value
return default
| mit |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/factorization/python/ops/gmm_test.py | 41 | 8716 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ops.gmm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.factorization.python.ops import gmm as gmm_lib
from tensorflow.contrib.learn.python.learn.estimators import kmeans
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed as random_seed_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.training import queue_runner
class GMMTest(test.TestCase):
def input_fn(self, batch_size=None, points=None):
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return x, None
indices = random_ops.random_uniform(constant_op.constant([batch_size]),
minval=0, maxval=num_points-1,
dtype=dtypes.int32,
seed=10)
return array_ops.gather(x, indices), None
return _fn
def setUp(self):
np.random.seed(3)
random_seed_lib.set_random_seed(2)
self.num_centers = 2
self.num_dims = 2
self.num_points = 4000
self.batch_size = self.num_points
self.true_centers = self.make_random_centers(self.num_centers,
self.num_dims)
self.points, self.assignments = self.make_random_points(
self.true_centers, self.num_points)
# Use initial means from kmeans (just like scikit-learn does).
clusterer = kmeans.KMeansClustering(num_clusters=self.num_centers)
clusterer.fit(input_fn=lambda: (constant_op.constant(self.points), None),
steps=30)
self.initial_means = clusterer.clusters()
@staticmethod
def make_random_centers(num_centers, num_dims):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * 500)
@staticmethod
def make_random_points(centers, num_points):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * 20)
points = centers[assignments] + offsets
return (points, assignments)
def test_weights(self):
"""Tests the shape of the weights."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
weights = gmm.weights()
self.assertAllEqual(list(weights.shape), [self.num_centers])
def test_clusters(self):
"""Tests the shape of the clusters."""
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=0)
clusters = gmm.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters='random',
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=1)
score1 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
gmm.fit(input_fn=self.input_fn(), steps=10)
score2 = gmm.score(input_fn=self.input_fn(batch_size=self.num_points),
steps=1)
self.assertLess(score1, score2)
def test_infer(self):
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
random_seed=4,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=60)
clusters = gmm.clusters()
# Make a small test set
num_points = 40
points, true_assignments = self.make_random_points(clusters, num_points)
assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=num_points)):
assignments.append(item)
assignments = np.ravel(assignments)
self.assertAllEqual(true_assignments, assignments)
def _compare_with_sklearn(self, cov_type):
# sklearn version.
iterations = 40
np.random.seed(5)
sklearn_assignments = np.asarray([0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
sklearn_means = np.asarray([[144.83417719, 254.20130341],
[274.38754816, 353.16074346]])
sklearn_covs = np.asarray([[[395.0081194, -4.50389512],
[-4.50389512, 408.27543989]],
[[385.17484203, -31.27834935],
[-31.27834935, 391.74249925]]])
# skflow version.
gmm = gmm_lib.GMM(self.num_centers,
initial_clusters=self.initial_means,
covariance_type=cov_type,
config=run_config.RunConfig(tf_random_seed=2))
gmm.fit(input_fn=self.input_fn(), steps=iterations)
points = self.points[:10, :]
skflow_assignments = []
for item in gmm.predict_assignments(
input_fn=self.input_fn(points=points, batch_size=10)):
skflow_assignments.append(item)
self.assertAllClose(sklearn_assignments,
np.ravel(skflow_assignments).astype(int))
self.assertAllClose(sklearn_means, gmm.clusters())
if cov_type == 'full':
self.assertAllClose(sklearn_covs, gmm.covariances(), rtol=0.01)
else:
for d in [0, 1]:
self.assertAllClose(
np.diag(sklearn_covs[d]), gmm.covariances()[d, :], rtol=0.01)
def test_compare_full(self):
self._compare_with_sklearn('full')
def test_compare_diag(self):
self._compare_with_sklearn('diag')
def test_random_input_large(self):
# sklearn version.
iterations = 5 # that should be enough to know whether this diverges
np.random.seed(5)
num_classes = 20
x = np.array([[np.random.random() for _ in range(100)]
for _ in range(num_classes)], dtype=np.float32)
# skflow version.
gmm = gmm_lib.GMM(num_classes,
covariance_type='full',
config=run_config.RunConfig(tf_random_seed=2))
def get_input_fn(x):
def input_fn():
return constant_op.constant(x.astype(np.float32)), None
return input_fn
gmm.fit(input_fn=get_input_fn(x), steps=iterations)
self.assertFalse(np.isnan(gmm.clusters()).any())
class GMMTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(capacity=10,
dtypes=dtypes.float32,
shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue,
[enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
gmm = gmm_lib.GMM(2, covariance_type='diag')
gmm.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
CallaJun/hackprince | indico/matplotlib/table.py | 11 | 17551 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
from . import artist
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from .cbook import is_string_like
from matplotlib import docstring
from .text import Text
from .transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None, **kwargs):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(six.iterkeys(self.codes))))
loc = 'bottom'
if is_string_like(loc):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self.update(kwargs)
self.set_clip_on(False)
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
keys = list(six.iterkeys(self._cells))
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in six.iterkeys(self._cells)
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return list(six.itervalues(self._cells))
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in six.itervalues(self._cells)]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in six.iteritems(self._cells):
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = list(six.iterkeys(widths))
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = list(six.iterkeys(heights))
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in six.iteritems(self._cells):
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
cells = []
for key, cell in six.iteritems(self._cells):
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in six.itervalues(self._cells):
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in six.itervalues(self._cells):
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in six.itervalues(self._cells):
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in six.itervalues(self._cells):
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = list(xrange(len(self.codes)))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None,
**kwargs):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Fill in missing information for column
# and row labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * rows
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
# If we have column labels, need to shift
# the text and colour arrays down 1 row
offset = 1
if colLabels is None:
if colColours is not None:
colLabels = [''] * cols
else:
offset = 0
elif colColours is None:
colColours = 'w' * cols
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox, **kwargs)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
| lgpl-3.0 |
haileybureau/analysis_scripts | explicit_energy_protein_plot.py | 1 | 2789 | #!/usr/bin/python
#
#author: Hailey Bureau
#latest edits: 2 May 2014
#
#
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from pylab import *
from matplotlib import rcParams
#with open("res10-d.dat") as f:
# data = f.readlines()
#data = data.split('\n')
'''
x = []
y = []
for row in data:
tmp=row.split()
print tmp
x.append(float(tmp[0]))
y.append(float(tmp[1]))
'''
#x = [row.split(' ')[0] for row in data]
#print x
##exit()
#y = [row.split(' ')[1] for row in data]
# FONTSIZE xx-small,x-small,small,medium,large,x-large,xx-large
fpropxxl=matplotlib.font_manager.FontProperties(size='xx-large')
fpropxl=matplotlib.font_manager.FontProperties(size='x-large')
fpropl=matplotlib.font_manager.FontProperties(size='large')
fpropm=matplotlib.font_manager.FontProperties(size='medium')
#FONTFAMILY
matplotlib.rcParams['font.family']='Times New Roman'
#to not cut off bottom axes
rcParams.update({'figure.autolayout': True})
data = np.loadtxt('explicit_energy_protein_notitles', unpack=True)
frame_number = data[0,:]
time = data[1,:]
bond = data[2,:]
angle = data[3,:]
dihedral = data[4,:]
improper = data[5,:]
electrostatic = data[6,:]
vdw = data[7,:]
conformational = data[8,:]
nonbonded = data[9,:]
total_energy = data[10,:]
#print 'total_energy', total_energy
#exit()
#a, b = np.loadtxt('pmf_NAMD_DANVT_imp_v1_n100_asmd.dat', unpack=True)
#c, d = np.loadtxt('pmf_NAMD_DANVT_exp_v1_n100_asmd.dat', unpack=True)
#e, f = np.loadtxt('cor_pmf_NAMD_DANVT_exp_v100_n100_asmd9st.dat', unpack=True)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = fig.add_subplot(111)
#ax3 = fig.add_subplot(111)
#ax4 = fig.add_subplot(111)
#exit()
#ax1.set_title("Plot title...")
ax1.set_xlabel(('Frame number'),fontproperties=fpropl)
ax1.set_ylabel(('Energy (kcal/mol)'),fontproperties=fpropl)
plt.title('Explicit solvent protein only')
plt.yticks((0,50,100,150,200,250),fontproperties=fpropm)
plt.xticks((0,100,200,300,400,500,600,700,800,900,1000),fontproperties=fpropm)
plt.ylim([0,250]) # manually define
plt.xlim([0,1000])
ax1.plot(time,total_energy,'r-',linewidth = 0.5,label='Total Energy')
ax2.plot(time, electrostatic, 'k-', linewidth = 0.5,label='Electrostatic')
#ax3.plot(time,d, 'b--',linewidth = 3.0,label='explicit 100 tps')
#ax4.plot(e,f,'g--', label='9 stages')
leg = ax1.legend(loc=2,prop={'size':10})
#leg = ax2.legend(loc=2,prop={'size':10})
#leg = ax3.legend(loc=2,prop={'size':10})
#leg = ax4.legend(loc=4)
#plt.legend(loc='upper left',prop={'size':10})
#leg = plt.gca().get_legend()
leg.draw_frame(False)
fig.set_size_inches(6.3,3.9)
plt.draw()
plt.savefig('energies_explicit_protein.eps')
#plt.savefig('mixed_vie_final.png')
#plt.savefig('mixed_vie_final.svg')
#plt.show()
| mit |
ltiao/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
0asa/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
mrshu/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 2 | 9937 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD Style.
import warnings
from sys import version_info
import numpy as np
from scipy import interpolate
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet
from sklearn.linear_model import LassoLarsCV
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and l1_ratio.
Actualy, the parameters alpha = 0 should not be alowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_path():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.026, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.026, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_enet_path():
X, y, X_test, y_test = build_dataset()
max_iter = 150
with warnings.catch_warnings():
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
warnings.simplefilter("ignore", UserWarning)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.9, 0.95], cv=3,
max_iter=max_iter)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.002, 2)
assert_equal(clf.l1_ratio_, 0.95)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.9, 0.95], cv=3,
max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.002, 2)
assert_equal(clf.l1_ratio_, 0.95)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 50
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
# Test that explicit warm restart...
clf = ElasticNet(alpha=1.0, max_iter=50)
clf.fit(X, y)
clf2 = ElasticNet(alpha=0.1, max_iter=50)
clf2.fit(X, y, coef_init=clf.coef_.copy())
#... and implicit warm restart are equivalent.
clf3 = ElasticNet(alpha=1.0, max_iter=50, warm_start=True)
clf3.fit(X, y)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.1)
clf3.fit(X, y)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_lasso_alpha_warning():
check_warnings() # Skip if unsupported Python version
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
clf.fit(X, Y)
assert_greater(len(w), 0) # warnings should be raised
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap, eps = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_, estimator.eps_)
for k in xrange(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
assert_array_almost_equal(eps[k], estimator.eps_)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
liang42hao/bokeh | examples/interactions/us_marriages_divorces/us_marriages_divorces_interactive.py | 26 | 3437 | # coding: utf-8
# Plotting U.S. marriage and divorce statistics
#
# Example code by Randal S. Olson (http://www.randalolson.com)
from bokeh.plotting import figure, show, output_file, ColumnDataSource
from bokeh.models import HoverTool, NumeralTickFormatter
from bokeh.models import SingleIntervalTicker, LinearAxis
import pandas as pd
# Since the data set is loaded in the bokeh data repository, we can do this:
from bokeh.sampledata.us_marriages_divorces import data
md_data = data.copy()
# Fill in missing data with a simple linear interpolation
md_data = md_data.interpolate(method='linear', axis=0).ffill().bfill()
# Tell Bokeh where to save the interactive chart
output_file('us_marriages_divorces_per_capita.html',
# Tell Bokeh to use its minified JavaScript hosted on a
# cdn instead of putting the Bokeh JS in the output file
# Warning: This makes it so people can only view the
# chart with an internet connection
mode='cdn',
title='144 years of marriage and divorce in the U.S.A.')
# Set up the data sources for the lines we'll be plotting.
# We need separate data sources for each line because we're
# displaying different data in the hover tool.
source_marriages = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Marriages_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Marriages_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['marriages per 1,000 people'] * len(md_data),
)
)
source_divorces = ColumnDataSource(
data=dict(
# x-axis (Years) for the chart
x=md_data.Year.values,
# y-axis (Marriages per capita) for the chart
y=md_data.Divorces_per_1000.values,
# The string version of the y-value that is displayed in the hover box
y_text=md_data.Divorces_per_1000.apply(
lambda x: '{}'.format(round(x, 1))),
# Extra descriptive text that is displayed in the hover box
desc=['divorces and annulments per 1,000 people'] * len(md_data),
)
)
# Use HTML to mark up the tooltip that displays over the chart
# Note that the variables in the data sources (above) are referenced with a @
hover = HoverTool(
tooltips='<font face="Arial" size="3">@y_text @desc in @x</font>')
# Select the tools that will be available to the chart
TOOLS = ['pan,wheel_zoom,box_zoom,reset,save,resize'] + [hover]
bplot = figure(tools=TOOLS, width=800, height=500, x_axis_type=None)
# Create a custom x-axis with 10-year intervals
ticker = SingleIntervalTicker(interval=10, num_minor_ticks=0)
xaxis = LinearAxis(ticker=ticker)
bplot.add_layout(xaxis, 'below')
# Customize the y-axis
bplot.yaxis.formatter = NumeralTickFormatter(format='0.0a')
bplot.yaxis.axis_label = '# per 1,000 people'
# Provide a descriptive title for the chart
bplot.title = '144 years of marriage and divorce in the U.S.'
# Finally, plot the data!
# Note that the data source determines what is plotted and what shows in
# the tooltips
bplot.line('x', 'y', color='#1f77b4', line_width=3, source=source_marriages)
bplot.line('x', 'y', color='#ff7f0e', line_width=3, source=source_divorces)
show(bplot)
| bsd-3-clause |
Radiomics/pyradiomics | labs/pyradiomics-dcm/pyradiomics-dcm.py | 1 | 21572 | import argparse
import csv
from decimal import Decimal
import distutils.spawn
import glob
import json
import logging
import os
import shutil
from subprocess import call
import sys
import tempfile
import numpy
import pandas
import pydicom
from radiomics import featureextractor
scriptlogger = logging.getLogger('radiomics.dicom')
scriptlogger.setLevel(logging.DEBUG)
def dcmImageToNRRD(inputDICOMImageDir, tempDir):
scanNRRDFile = os.path.join(tempDir, "image.nrrd")
if not os.path.isfile(scanNRRDFile):
call(['plastimatch', 'convert', '--input',
inputDICOMImageDir, '--output-img', scanNRRDFile])
return scanNRRDFile
def dcmImageToNIfTI(inputDICOMImageDir, tempDir):
destScanNIfTIFile = os.path.join(tempDir, "volume.nii")
scanNIfTIFile = os.path.join(inputDICOMImageDir, "volume.nii")
scanJSONFile = os.path.join(inputDICOMImageDir, "volume.json")
# will save to volume.nii
if not os.path.isfile(destScanNIfTIFile):
cmd = ['dcm2niix', "-m", "y", "-f", "volume", inputDICOMImageDir]
call(cmd)
shutil.move(scanNIfTIFile, destScanNIfTIFile)
if os.path.isfile(scanJSONFile):
os.remove(scanJSONFile)
return destScanNIfTIFile
# individual segments will be extracted to the destination directory into NRRD
# files, with the names assigned consecutive numbers starting from 1
def dcmSEGToNRRDs(inputSEG, tempDir):
segmentsDir = os.path.join(tempDir, 'Segments')
if not os.path.isdir(segmentsDir):
os.mkdir(segmentsDir)
call(['segimage2itkimage', '--inputDICOM',
inputSEG, '--outputDirectory', segmentsDir])
return glob.glob(os.path.join(segmentsDir, "*nrrd"))
def writeSR(inputSEG, inputJSON, inputDICOMImageDir, outputSR):
cmd = [
'tid1500writer',
'--inputImageLibraryDirectory',
inputDICOMImageDir,
'--inputCompositeContextDirectory',
os.path.split(inputSEG)[0],
'--inputMetadata',
inputJSON,
'--outputDICOM',
outputSR]
scriptlogger.debug("Writing SR with: " + str(cmd))
call(cmd)
def getCTSeriesUID(imageDICOMDir):
ctFile = os.listdir(imageDICOMDir)[0]
dcm = pydicom.read_file(os.path.join(imageDICOMDir, ctFile))
return dcm.SeriesInstanceUID
class DICOMMetadataAccessor:
def __init__(self, dcmFileName):
self.dcm = pydicom.read_file(dcmFileName)
def getInstanceUID(self):
return self.dcm.SOPInstanceUID
def getSeriesDescription(self):
return self.dcm.SeriesDescription
def getSeriesInstanceUID(self):
return self.dcm.SeriesInstanceUID
class SEGMetadataAccessor(DICOMMetadataAccessor):
def __init__(self, segFileName):
DICOMMetadataAccessor.__init__(self, segFileName)
if self.dcm.SOPClassUID != '1.2.840.10008.5.1.4.1.1.66.4':
raise ValueError(
"SEGMetadataAccessor: DICOM object is not Segmentation!")
def getSegmentSegmentationTypeCode(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].SegmentedPropertyTypeCodeSequence[0]
except BaseException:
return None
def getTrackingIdentifier(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].TrackingIdentifier
except BaseException:
return None
def getTrackingUniqueIdentifier(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].TrackingUID
except BaseException:
return None
def getSegmentDescription(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].SegmentDescription
except BaseException:
return None
def getSegmentAnatomicLocationCode(self, segmentNumber):
try:
return self.dcm.SegmentSequence[segmentNumber].AnatomicRegionSequence[0]
except BaseException:
return None
class CodedValue:
def __init__(self, value, scheme, meaning):
self.codeValue = value
self.codingSchemeDesignator = scheme
self.codeMeaning = meaning
def getDict(self):
return {"CodeValue": self.codeValue, "CodeMeaning": self.codeMeaning,
"CodingSchemeDesignator": self.codingSchemeDesignator}
class TID1500Metadata:
def __init__(
self,
featuresDictFile,
seriesDescription="Radiomics features"):
self.featuresDict = self.readDictionary(featuresDictFile)
self.m = {}
self.m["@schema"] = "https://raw.githubusercontent.com/qiicr/dcmqi/master/doc/schemas/sr-tid1500-schema.json#"
self.m["SeriesDescription"] = seriesDescription
self.m["Measurements"] = []
self.measurementGroupCount = 0
def addMeasurementGroup(self):
self.measurementGroupCount = self.measurementGroupCount + 1
measurementsGroup = {}
measurementsGroup["measurementItems"] = []
measurementsGroup["ReferencedSegment"] = self.measurementGroupCount
self.m["Measurements"].append(measurementsGroup)
@staticmethod
def readDictionary(featuresDictFile):
return pandas.read_csv(featuresDictFile, sep='\t', low_memory=False)
@staticmethod
def makeHash(text, length=6):
from base64 import b64encode
from hashlib import sha1
return b64encode(sha1(str.encode(text)).digest()).decode('ascii')[:length]
def makePrivateCode(self, text):
return CodedValue(self.makeHash(text), "99PYRADIOMICS", text).getDict()
# returns None if prefix is not recognized, otherwise returns a tuple of
# (measurementModifiers, derivationParameters)
def prefix2codes(self, prefix):
modifiers = []
derivationParameters = []
import re
imageTransformationConcept = self.makePrivateCode(
"Image transformation")
if re.match("original", prefix):
pass
elif re.match("square", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Square transformation")})
elif re.match("squareroot", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Square root transformation")})
elif re.match("logarithm", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Logarithm transformation")})
elif re.match("gradient", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Gradient magnitude transformation")})
elif re.match("exponential", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Exponent transformation")})
elif re.match("exponential", prefix):
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Exponent transformation")})
# parameterized processing operations
elif re.match(r"wavelet-([HL]{2,3})", prefix):
match = re.match(r"wavelet-([HL]{2,3})", prefix)
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Wavelet transformation")})
modifiers.append({"modifier": self.makePrivateCode("Wavelet sub-band"),
"modifierValue": self.makePrivateCode(match.group(1))})
elif re.match(r"log-sigma-([\d]+)-([\d]+)-([a-z]+)", prefix):
match = re.match(r"log-sigma-([\d]+)-([\d]+)-([a-z]+)", prefix)
units = match.group(3)
if units == "mm":
unitsCode = CodedValue("mm", "UCUM", "millimeters").getDict()
elif units == "cm":
unitsCode = CodedValue("mm", "UCUM", "centimeters").getDict()
else:
unitsCode = self.makePrivateCode(units)
modifiers.append({"modifier": imageTransformationConcept,
"modifierValue": self.makePrivateCode("Laplacian of Gaussian")})
derivationParameters.append({"derivationParameter": self.makePrivateCode("Kernel size"),
"derivationParameterValue": str('.'.join([match.group(1), match.group(2)])),
"derivationParameterUnits": unitsCode})
else:
# unknown prefix
return None
return modifiers, derivationParameters
# adds a single measurement to the last measurement group
def addMeasurement(
self,
value,
quantityCode,
unitsCode=CodedValue(
"1",
"UCUM",
"no units"
)):
if self.measurementGroupCount < 1:
scriptlogger.error(
"Cannot add measurement - no measurement groups initialized!")
return
(preprocessing, featureClass, featureName) = quantityCode.split('_')
mpTuple = self.prefix2codes(preprocessing)
if mpTuple is None:
return
measurement = {}
classSubset = self.featuresDict[self.featuresDict['pyradiomics_feature_class'] == featureClass]
featureTuple = classSubset[classSubset['pyradiomics_feature_name'] == featureName]
if featureTuple.empty:
codeMeaning = featureClass + "_" + featureName
code = self.makeHash(codeMeaning)
measurement["quantity"] = CodedValue(
code, "99PYRADIOMICS", codeMeaning).getDict()
if len(code) > 16:
scriptlogger.error("Sorry, the code value is too long!")
sys.exit()
else:
measurement["quantity"] = CodedValue(
featureTuple["IBSI_code"].values[0],
"IBSI",
featureTuple["IBSI_meaning"].values[0]).getDict()
try:
if numpy.isnan(value):
scriptlogger.info(
"Skipping NaN value for feature %s",
quantityCode)
return
except Exception as e:
scriptlogger.error("Exception checking for NaN: %s %s", str(e), value)
return
try:
measurement["value"] = '%E' % Decimal(float(value))
except Exception as e:
scriptlogger.error("Exception formatting %s as Decimal: %s", value, str(e))
scriptlogger.error("type of value: %s", type(value))
measurement["units"] = unitsCode.getDict()
self.m["Measurements"][-1]["measurementItems"].append(measurement)
if len(mpTuple[0]):
measurement["measurementModifiers"] = [m for m in mpTuple[0]]
if len(mpTuple[1]):
measurement["measurementDerivationParameters"] = [
d for d in mpTuple[1]]
return
def saveJSONToFile(self, fileName):
with open(fileName, 'w') as f:
json.dump(self.m, f, indent=2, sort_keys=True)
def main():
parser = argparse.ArgumentParser(
usage="%(prog)s --input-image <dir> --input-seg <name> --output-sr <name>\n\n"
"Warning: This is a \"pyradiomics labs\" script, which means it is an experimental feature in development!\n"
"The intent of this helper script is to enable pyradiomics feature extraction directly from/to DICOM data.\n"
"The segmentation defining the region of interest must be defined as a DICOM Segmentation image.\n"
"Support for DICOM Radiotherapy Structure Sets for defining region of interest may be added in the future.\n")
parser.add_argument(
'--input-image-dir',
dest="inputDICOMImageDir",
metavar='Input DICOM image directory',
help="Directory with the input DICOM series."
" It is expected that a single series is corresponding to a single scalar volume.",
required=True)
parser.add_argument(
'--input-seg-file',
dest="inputSEG",
metavar='Input DICOM SEG file',
help="Input segmentation defined as a"
"DICOM Segmentation object.",
required=True)
parser.add_argument(
'--output-dir',
dest="outputDir",
metavar='Directory to store the output file',
help="Directory for saving the resulting DICOM file.",
required=True)
parser.add_argument(
'--parameters',
dest="parameters",
metavar="pyradiomics extraction parameters")
parser.add_argument(
'--temp-dir',
dest="tempDir",
metavar="Temporary directory")
parser.add_argument(
'--features-dict',
dest="featuresDict",
metavar="Dictionary mapping pyradiomics feature names to the IBSI defined features.")
parser.add_argument(
'--volume-reconstructor',
dest="volumeReconstructor",
metavar="Choose the tool to be used for reconstructing image volume from the DICOM image series."
" Allowed options are plastimatch or dcm2niix (should be installed on the system). plastimatch "
"will be used by default.",
choices=['plastimatch', 'dcm2nixx'],
default="plastimatch")
args = parser.parse_args()
# with tempfile.mkdtemp() as tempDir:
tempDir = args.tempDir
if not tempDir:
tempDir = tempfile.mkdtemp()
scriptlogger.info("Temporary directory: " + tempDir)
# convert input DICOM series into a scalar volume
# plastimatch fails for prostate DWI Data! Need to report
# Selection of the optimal volume reconstructor may depend
# on the specific dataset!
if args.volumeReconstructor == "plastimatch":
scriptlogger.info(
"Using Plastimatch for DICOM image volume reconstruction.")
inputImage = dcmImageToNRRD(args.inputDICOMImageDir, tempDir)
else:
scriptlogger.info(
"Using dcm2niix for DICOM image volume reconstruction.")
inputImage = dcmImageToNIfTI(args.inputDICOMImageDir, tempDir)
# convert segmentation into segments
inputSegments = dcmSEGToNRRDs(args.inputSEG, tempDir)
if len(inputSegments) == 0:
scriptlogger.error("No segments found. Cannot compute features.")
return -1
featuresDir = os.path.join(tempDir, 'Features')
if not os.path.isdir(featuresDir):
os.mkdir(featuresDir)
# initialize Metadata for the individual features
# TODO: to be replaced with direct mapping in the pyradiomics feature functions
# see https://github.com/Radiomics/pyradiomics/issues/435
if args.featuresDict is not None:
featuresDictPath = args.featuresDict
else:
featuresDictPath = "featuresDict.tsv"
if not os.path.exists(featuresDictPath):
scriptlogger.error(
"Features dictionary file %s is not found!",
featuresDictPath)
return -1
m = TID1500Metadata(featuresDictPath)
# find a valid DICOM file in the input image DICOM directory
dicomImage = None
for f in os.listdir(args.inputDICOMImageDir):
try:
pydicom.read_file(os.path.join(args.inputDICOMImageDir, f))
dicomImage = os.path.join(args.inputDICOMImageDir, f)
break
except BaseException:
continue
if dicomImage is None:
scriptlogger.error(
"Input DICOM image directory does not seem to contain any valid DICOM files!")
return -1
imageMetadataAccessor = DICOMMetadataAccessor(
os.path.join(args.inputDICOMImageDir, f))
segmentationMetadataAccessor = SEGMetadataAccessor(args.inputSEG)
pyradiomicsVersion = None
for inputSegment in inputSegments:
scriptlogger.debug("Processing segmentation file %s", inputSegment)
segmentNumber = os.path.split(inputSegment)[-1].split('.')[0]
try:
scriptlogger.debug("Initializing extractor")
# TODO: most likely, there will be geometric inconsistencies that will throw
# pyradiomics off by exceeding the default tolerance. Need to decide if we
# should always resample, or set tolerance to a larger number, or expose in
# the command line.
correctMaskSetting = {}
correctMaskSetting["setting"] = {
"geometryTolerance": 0.001, "correctMask": True}
params = []
if args.parameters is not None:
params = [args.parameters]
extractor = featureextractor.RadiomicsFeatureExtractor(*params, **correctMaskSetting)
except Exception:
scriptlogger.error(
'Initialization of the pyradimics feature extraction failed.', exc_info=True)
return -1
featureVector = extractor.execute(
inputImage, inputSegment, int(segmentNumber))
if len(featureVector) == 0:
scriptlogger.error("No features extracted!")
return -1
featuresFileName = os.path.join(featuresDir, segmentNumber + '.csv')
scriptlogger.debug("Will save features as %s", featuresFileName)
writer = csv.writer(open(featuresFileName, 'w'), lineterminator='\n')
headers = list(featureVector.keys())
writer.writerow(headers)
row = []
for h in headers:
row.append(featureVector.get(h, ""))
writer.writerow(row)
scriptlogger.debug("Initializing TID 1500 Measurement groups.")
m.addMeasurementGroup()
includedFeatureVectorItems = 0
for featureName in featureVector.keys():
if featureName == 'diagnostics_Versions_PyRadiomics':
pyradiomicsVersion = featureVector[featureName]
continue
featureValue = featureVector[featureName]
featureNameSplit = featureName.split('_')
if len(featureNameSplit) < 3:
scriptlogger.warning(
"Skipping unrecognized feature %s",
featureName)
continue
includedFeatureVectorItems += 1
m.addMeasurement(featureValue, featureName)
scriptlogger.debug(
"%d of %d total features included in the TID 1500 Measurement group.",
len(featureVector), includedFeatureVectorItems)
# initialize metadata common to all measurements
scriptlogger.debug("Populating common metadata")
m.m["Measurements"][-1]["SourceSeriesForImageSegmentation"] = imageMetadataAccessor.getSeriesInstanceUID()
m.m["Measurements"][-1]["segmentationSOPInstanceUID"] = segmentationMetadataAccessor.getInstanceUID()
# TODO: populate those from SEG SegmentationType / AnatomicLocation
segmentationType = segmentationMetadataAccessor.getSegmentSegmentationTypeCode(
int(segmentNumber) - 1)
if segmentationType:
m.m["Measurements"][-1]["Finding"] = CodedValue(segmentationType.CodeValue,
segmentationType.CodingSchemeDesignator,
segmentationType.CodeMeaning).getDict()
segTrackingIdentifier = segmentationMetadataAccessor.getTrackingIdentifier(int(segmentNumber)-1)
segTrackingUniqueIdentifier = segmentationMetadataAccessor.getTrackingUniqueIdentifier(int(segmentNumber)-1)
if segTrackingIdentifier:
m.m["Measurements"][-1]["TrackingIdentifier"] = segTrackingIdentifier
else:
m.m["Measurements"][-1]["TrackingIdentifier"] = segmentationType.CodeMeaning
segmentDescription = segmentationMetadataAccessor.getSegmentDescription(int(segmentNumber)-1)
# SegmentDescription is Type 3, and can be missing
if segmentDescription is not None:
m.m["Measurements"][-1]["TrackingIdentifier"] = segmentationType.CodeMeaning+" - "+segmentDescription
if segTrackingUniqueIdentifier:
m.m["Measurements"][-1]["TrackingUniqueIdentifier"] = segTrackingUniqueIdentifier
segmentationLocation = segmentationMetadataAccessor.getSegmentAnatomicLocationCode(
int(segmentNumber) - 1)
if segmentationLocation:
m.m["Measurements"][-1]["FindingSite"] = CodedValue(segmentationLocation.CodeValue,
segmentationLocation.CodingSchemeDesignator,
segmentationLocation.CodeMeaning).getDict()
# AlgorithmIdentification
m.m["Measurements"][-1]["measurementAlgorithmIdentification"] = {}
m.m["Measurements"][-1]["measurementAlgorithmIdentification"]["AlgorithmName"] = "https://github.com/Radiomics/pyradiomics"
m.m["Measurements"][-1]["measurementAlgorithmIdentification"]["AlgorithmVersion"] = pyradiomicsVersion
m.m["Measurements"][-1]["measurementAlgorithmIdentification"]["AlgorithmParameters"] = [json.dumps(extractor.settings)]
m.m["observerContext"] = {}
m.m["observerContext"]["ObserverType"] = "DEVICE"
m.m["observerContext"]["DeviceObserverName"] = "pyradiomics"
m.m["observerContext"]["DeviceObserverModelName"] = pyradiomicsVersion
m.m["compositeContext"] = [os.path.split(args.inputSEG)[-1]]
m.m["imageLibrary"] = [os.path.split(f)[-1]
for f in os.listdir(args.inputDICOMImageDir)]
m.m["SeriesDescription"] = segmentationMetadataAccessor.getSeriesDescription() + ' - pyradiomics features'
scriptlogger.debug("Saving temporary files for DICOM SR writer.")
dcmqiMetadataFile = os.path.join(featuresDir, "dcmqi_sr.json")
outputSRTempFile = os.path.join(featuresDir, "sr.dcm")
m.saveJSONToFile(dcmqiMetadataFile)
scriptlogger.debug("Generating DICOM SR.")
writeSR(
args.inputSEG,
dcmqiMetadataFile,
args.inputDICOMImageDir,
outputSRTempFile)
# copy to the dest directory under UID as a name
try:
dcm = pydicom.read_file(outputSRTempFile)
shutil.move(
outputSRTempFile,
os.path.join(args.outputDir, dcm.SOPInstanceUID + ".dcm"))
except BaseException:
scriptlogger.error("Failed to move output SR!")
if __name__ == "__main__":
exeFound = {}
for exe in ['tid1500writer', 'dcm2niix', 'plastimatch', 'segimage2itkimage']:
if distutils.spawn.find_executable(exe) is None:
exeFound[exe] = False
else:
exeFound[exe] = True
if not (exeFound['tid1500writer'] and exeFound['segimage2itkimage']) or not (
exeFound['plastimatch'] or exeFound['dcm2niix']):
scriptlogger.error(
"Dependency converter(s) not found in the path.")
scriptlogger.error(
"dcmqi (https://github.com/qiicr/dcmqi), and dcm2niix (https://github.com/rordenlab/dcm2niix/releases)")
scriptlogger.error("or Plastimatch (http://plastimatch.org/)")
scriptlogger.error(
"need to be installed and available in the PATH for using this converter script.")
sys.exit()
main()
| bsd-3-clause |
benschneider/sideprojects1 | Shotnoise-Calibration/old/SNfit.py | 1 | 13317 | # -*- coding: utf-8 -*-
'''
@author: Ben Schneider
A script is used to readout mtx measurement data which also contains
a shotnoise responses.
Then fits them for G and Tn
'''
import numpy as np
from parsers import savemtx, loadmtx, make_header
from scipy.optimize import curve_fit # , leastsq
# import scipy.optimize
from scipy.constants import Boltzmann as Kb
from scipy.constants import h, e # , pi
from scipy.ndimage.filters import gaussian_filter1d
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit # , Parameter
def xderiv(d2MAT, dx=1.0, axis=0):
'''
This derivative is inaccurate at the edges.
Calculates a 3p derivative of a 1D, 2D matrix.
This does not require you to shift the xaxis by one half pt.
dx = distance between points
'''
if len(d2MAT.shape) > 1:
if axis == 1:
''' Not tested yet could be faster than a matrix transpose'''
a2 = np.zeros([d2MAT.shape[0]+2, d2MAT.shape[1]])
a2[1:-1, :] = d2MAT
m1 = d2MAT - a2[:-2, :]
m2 = a2[2:, :] - d2MAT
dy = (m1+m2)/2.0
dy[0, :] = dy[1, :]
dy[-1, :] = dy[-2, :]
elif axis == 0:
a2 = np.zeros([d2MAT.shape[0], d2MAT.shape[1]+2])
a2[:, 1:-1] = d2MAT
m1 = d2MAT - a2[:, :-2]
m2 = a2[:, 2:] - d2MAT
dy = (m1+m2)/2.0
dy[:, 0] = dy[:, 1]
dy[:, -1] = dy[:, -2]
return dy/dx
else:
a2 = np.zeros([d2MAT.shape[0]+2])
a2[1:-1] = d2MAT
m1 = d2MAT - a2[:-2]
m2 = a2[2:] - d2MAT
dy = (m1+m2)/2.0
dy[0] = dy[1]
dy[-1] = dy[-2]
return dy/dx
def find_nearest(someArray, value):
''' This function helps to find the index corresponding to a value
in an array.
Usage: indexZero = find_nearest(myarray, 0.0)
returns: abs(myarray-value).argmin()
'''
idx = abs(someArray-value).argmin()
return idx
def fitfunc(G, Tn, T, vc):
# def fitfunc(x, G, Tn, T, c):
'''
This contains the fitting equation, which i use to fit the
shot noise response.
returns: fit-value(x, ...)
'''
E1 = (e*vc.I*vc.dRm+h*vc.f)/(2*Kb*T)
E2 = (e*vc.I*vc.dRm-h*vc.f)/(2*Kb*T)
Si = ((2*Kb*T/vc.dRm) * (E1/np.tanh(E1) + E2/np.tanh(E2)))
return (vc.B*G*(Si * vc.dRm**2 +
4.0*Kb*T*vc.dRm + 4.0*Kb*Tn*vc.Z0 *
(vc.dRm**2+vc.Zopt*vc.Zopt)/(vc.Z0*vc.Z0+vc.Zopt*vc.Zopt)) *
(vc.Z0/((vc.dRm+vc.Z0)*(vc.dRm+vc.Z0))))
def fitfun2(params, vc):
'''
req: params with G, Tn, T; and vc as variable carrier
return: fitting value or array
'''
G = params['G'].value
Tn = params['Tn'].value
T = params['T'].value
return fitfunc(G, Tn, T, vc)
def ministuff(params, vc, measd):
'''
req: params with G, Tn, T
I (current array or value) (Amps)
dRm (Resistance for this current)
measured data (value or array)
crop values for example create with:
crop_within = find_nearest(I, -0.9e-6), find_nearest(I, 1.1e-6)
crop_outside = find_nearest(I, -19.5e-6), find_nearest(I, 19.5e-6)
crop = [crop_within, crop_outside]
This crop is used to cut data corresponding to the current values
i.e. to cut away the critical current part (from to (crop within))
also edges where the derivative and filter is inaccurate (crop outside)
returns: residuals*1e10;
(difference between measured and fitted data after it has been croped)
'''
SNfit = fitfun2(params, vc)
SNfit[vc.crop[0][0]:vc.crop[0][1]] = 0
measd[vc.crop[0][0]:vc.crop[0][1]] = 0
SNfit[0:(vc.crop[1][0])] = 0
SNfit[vc.crop[1][1]:-1] = 0
measd[0:(vc.crop[1][0])] = 0
measd[vc.crop[1][1]:-1] = 0
return (measd-SNfit)*1e10
class SN_class():
'''
This is simply an empty class which i am going to use
to store the shot-noise fitting results
'''
def __init__(self):
''' Using empty lists at which i can append incoming data'''
self.G1del = []
self.G2del = []
self.Tn1del = []
self.Tn2del = []
self.G1 = []
self.G2 = []
self.Tn1 = []
self.Tn2 = []
class my_variables_class():
''' used to store and pass lots of variables and locations
simply create with vc = my_variable_class()
it currently has the following default settings in __init__:
self.Z0 = 50.0
self.Zopt = 50.0
self.B = 1e5
self.f1 = 4.1e9
self.f2 = 4.8e9
self.RTR = 1009.1 * 1e3 # Ib Resistance in Ohm
self.RG = 1000.0 # Pre Amp gain factor
self.filein1 = 'S1_949_G0mV_SN_PCovMat_cI1I1.mtx'
self.filein2 = 'S1_949_G0mV_SN_PCovMat_cQ1Q1.mtx'
self.filein3 = 'S1_949_G0mV_SN_PCovMat_cI2I2.mtx'
self.filein4 = 'S1_949_G0mV_SN_PCovMat_cQ2Q2.mtx'
self.filein5 = 'S1_949_G0mV_SN_PV'
self.fifolder = 'sn_data//'
'''
def __init__(self):
self.LP = 3 # Gaus-Filter i.e. Low-Pass Vm derivative
self.Z0 = 50.0
self.Zopt = 50.0
self.B = 1e5
self.f1 = 4.1e9
self.f2 = 4.8e9
self.RTR = 1009.1 * 1e3 # Ib Resistance in Ohm
self.RG = 1000.0 # Pre Amp gain factor
self.filein1 = 'S1_949_G0mV_SN_PCovMat_cI1I1.mtx'
self.filein2 = 'S1_949_G0mV_SN_PCovMat_cQ1Q1.mtx'
self.filein3 = 'S1_949_G0mV_SN_PCovMat_cI2I2.mtx'
self.filein4 = 'S1_949_G0mV_SN_PCovMat_cQ2Q2.mtx'
self.filein5 = 'S1_949_G0mV_SN_PV.mtx'
# cross correlation files
self.filein6 = 'S1_949_G0mV_SN_PCovMat_cI1I2.mtx'
self.filein7 = 'S1_949_G0mV_SN_PCovMat_cI1Q2.mtx'
self.filein8 = 'S1_949_G0mV_SN_PCovMat_cQ1I2.mtx'
self.filein9 = 'S1_949_G0mV_SN_PCovMat_cQ1Q2.mtx'
self.fifolder = 'sn_data//'
def load_and_go(self):
'''
simply executes the sub definitions
loads data, normalizes to SI units, calculates differential resistances
'''
self.loaddata()
self.loadCcor()
self.norm_to_SI()
self.calc_diff_resistance()
def loaddata(self):
'''
Loads the data defined in self.filein1 .. 5
'''
(self.I1I1, d3,
self.d2, self.d1, self.dz) = loadmtx(self.fifolder + self.filein1)
self.Q1Q1, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein2)
self.I2I2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein3)
self.Q2Q2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein4)
self.Vm, self.d3, dv2, dv1, dvz = loadmtx(self.fifolder + self.filein5)
def loadCcor(self):
'''
want to simply load the amplitude at max correlation position
i.e. at lags = 0
'''
I1I2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein6)
I1Q2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein7)
Q1I2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein8)
Q1Q2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein9)
I1Q1, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein7)
I2Q2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein7)
lags0 = find_nearest(d1.lin, 0.0) # lags position
self.cI1I2 = I1I2[lags0]
self.cI1Q2 = I1Q2[lags0]
self.cQ1I2 = Q1I2[lags0]
self.cQ1Q2 = Q1Q2[lags0]
self.cI1Q1 = I1Q1[lags0]
self.cI2Q2 = I2Q2[lags0]
self.cPD1 = (self.I1I1[lags0]+self.Q1Q1[lags0])
self.cPD2 = (self.I2I2[lags0]+self.Q2Q2[lags0])
def norm_to_SI(self):
'''
Take amplifier gains and resistances as defined by self.RTR and self.RG
to scale voltage units to [Volt] and [Amps]
'''
self.d3.scale = 1.0/(self.RTR) # scale X-axis to Amps
self.d3.update_lin()
self.I = self.d3.lin
self.Vm = self.Vm/self.RG # scale Vm-data to Volts
def calc_diff_resistance(self):
'''
calculates the differential resistance of all traces in one go
'''
self.d3step = self.d3.lin[1] - self.d3.lin[0] # get step-size
self.dIV = xderiv(self.Vm[0], self.d3step)
self.dIVlp = gaussian_filter1d(abs(self.dIV), self.LP) # Gausfilter
def getSNfits(vc):
'''
Loading the data files I1I1, Q1Q1, I2I2, Q2Q2, Vm
d1, d2, d3 are all the same since they all originate from the same type of
measurement.
before running it needs details of files and parameters to use.
Those are made by creating a variables_class;
example:
vc = my_variables_class()
which contains the following default settings
vc.Z0 = 50.0
vc.Zopt = 50.0
vc.B = 1e5
vc.f1 = 4.1e9
vc.f2 = 4.8e9
vc.RTR = 1009.1 * 1e3 # Ib Resistance in Ohms
vc.RG = 1000.0 # Pre Amp gain factor
additionally the filenames need to be defined in there:
simply give the base filenames as:
vc.filein1 = 'S1_949_G0mV_SN_PCovMat_cI1I1.mtx'
vc.filein2 = 'S1_949_G0mV_SN_PCovMat_cQ1Q1.mtx'
vc.filein3 = 'S1_949_G0mV_SN_PCovMat_cI2I2.mtx'
vc.filein4 = 'S1_949_G0mV_SN_PCovMat_cQ2Q2.mtx'
vc.filein5 = 'S1_949_G0mV_SN_PV.mtx'
and of course the folder where to find these files
vc.fifolder = 'sn_data//'
Right now this def getSNfits does too many things for a single definition:
- loads the defined mtx files into the vc class
'''
SNr = SN_class()
vc.load_and_go()
plt.ion()
# create crop vector for the fitting
crop_within = find_nearest(vc.I, -1.55e-6), find_nearest(vc.I, 1.55e-6)
crop_outside = find_nearest(vc.I, -19e-6), find_nearest(vc.I, 19e-6)
vc.crop = [crop_within, crop_outside]
# create fitting parameters
params = Parameters()
params.add('Tn', value=3.7, vary=True) # , min=2.2, max=5)
params.add('G', value=3.38e7, vary=True) # , min=1e6, max=1e9)
params.add('T', value=0.012, vary=False, min=0.01, max=0.5)
data1 = vc.cPD1*1.0
data2 = vc.cPD2*1.0
for pidx in range(len(data1)):
'''
scales Voltage_trace[selected power] to Volts
obtains differential Resistance Rm
fits selected data set
records corresponding fit results into SN_r class values
'''
vc.dRm = vc.dIVlp[pidx] # select dRm which is wanted
vc.f = vc.f1
result = minimize(ministuff, params, args=(vc, data1[pidx]*1.0))
print report_fit(result)
SNr.G1del.append(result.params['G'].stderr)
SNr.Tn1del.append(result.params['Tn'].stderr)
SNr.G1.append(result.params['G'].value)
SNr.Tn1.append(result.params['Tn'].value)
SNfit1 = fitfun2(result.params, vc)
Pn1 = (result.params['G'].value*vc.B *
(Kb*(result.params['Tn'].value+result.params['T'])+0.5*h*vc.f1))
Pn1array = np.ones(len(vc.I))*Pn1
plt.figure()
title = ('D1, RF-Drive: ' + str(vc.d2.lin[pidx]))
plt.plot(vc.I, data1[pidx]*1e9)
plt.hold(True)
plt.plot(vc.I, SNfit1*1e9)
plt.plot(vc.I, Pn1array*1e9)
plt.title(title)
plt.hold(False)
plt.show()
vc.f = vc.f2
result = minimize(ministuff, params, args=(vc, data2[pidx]*1.0))
print report_fit(result)
SNr.G2del.append(result.params['G'].stderr)
SNr.Tn2del.append(result.params['Tn'].stderr)
SNr.G2.append(result.params['G'].value)
SNr.Tn2.append(result.params['Tn'].value)
SNfit2 = fitfun2(result.params, vc)
Pn2 = (result.params['G'].value*vc.B *
(Kb*(result.params['Tn'].value+result.params['T'])+0.5*h*vc.f2))
Pn2array = np.ones(len(vc.I))*Pn2
plt.figure()
title = ('D2, RF-Drive: ' + str(vc.d2.lin[pidx]))
plt.plot(vc.I, data2[pidx]*1e9)
plt.hold(True)
plt.plot(vc.I, SNfit2*1e9)
plt.plot(vc.I, Pn2array*1e9)
plt.title(title)
plt.hold(False)
plt.show()
# lists to array
SNr.G1 = np.array(SNr.G1)
SNr.G2 = np.array(SNr.G2)
SNr.Tn1 = np.array(SNr.Tn1)
SNr.Tn2 = np.array(SNr.Tn2)
SNr.G1del = np.array(SNr.G1del)
SNr.G2del = np.array(SNr.G2del)
SNr.Tn1del = np.array(SNr.Tn1del)
SNr.Tn2del = np.array(SNr.Tn2del)
# Photon numbers hemt input
SNr.Pi1 = (Kb*SNr.Tn1)/(h*vc.f1) + 0.5
SNr.Pi1del = (Kb*SNr.Tn1del)/(h*vc.f1)
SNr.Pi2 = (Kb*SNr.Tn2)/(h*vc.f2) + 0.5
SNr.Pi2del = (Kb*SNr.Tn2del)/(h*vc.f2)
# Noise power at output at I = 0
SNr.Pn1 = SNr.G1 * vc.B * SNr.Pi1 * (h * vc.f1)
SNr.Pn1del = (SNr.Pn1 * np.sqrt((SNr.G1del/SNr.G1)**2 +
(SNr.Tn1del/SNr.Tn1)**2))
SNr.Pn2 = SNr.G2 * vc.B * SNr.Pi2 * (h * vc.f2)
SNr.Pn2del = (SNr.Pn2 * np.sqrt((SNr.G2del/SNr.G2)**2 +
(SNr.Tn2del/SNr.Tn2)**2))
print 'Photons in1', SNr.Pi1.mean(), '+/-', SNr.Pi1del.mean()
print 'Photons in2', SNr.Pi2.mean(), '+/-', SNr.Pi2del.mean()
print 'Pn1', SNr.Pn1.mean(), '+/-', SNr.Pn1del.mean()
print 'Pn2', SNr.Pn2.mean(), '+/-', SNr.Pn2del.mean()
return SNr
| gpl-2.0 |
fanshi118/Time-Out-NY-with-ML-Revisited | ml_models/lr_train.py | 1 | 4647 | # http://rnowling.github.io/data/science/2016/10/20/lr-hashing-recsys.html
# https://github.com/rnowling/rec-sys-experiments
import numpy as np, scipy.sparse as sp
import random, sys
from sklearn.feature_extraction import FeatureHasher
from sklearn.linear_model import SGDClassifier
# from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix
from sklearn.externals import joblib
import data_helper
import matplotlib.pyplot as plt
plt.style.use("fivethirtyeight")
# paths to data
_tr = "../data/train_data.csv"
_vv = "../data/valid_visible.csv"
_vp = "../data/valid_predict.csv"
def generate_interaction(_tr, _vv, _vp):
print "Creating user venue-interaction lists"
_, all_venues = data_helper.get_unique(_tr, users=False, venues=True)
train_pairs, valid_pairs = data_helper.get_user_venue_pairs(_tr, _vv, _vp)
return all_venues, train_pairs, valid_pairs
def generate_features(all_venues, yay_venues):
yay_pairs, nay_pairs = [], []
# positive examples
for venue1 in yay_venues:
venue_pairs = dict()
for venue2 in yay_venues:
# skip itself to avoid overfitting
if venue1 != venue2:
venue_pairs["%s_%s" % (venue1, venue2)] = 1.
yay_pairs.append(venue_pairs)
# negative examples
nay_venues = all_venues - yay_venues
for venue1 in random.sample(nay_venues, len(yay_venues)):
venue_pairs = dict()
for venue2 in yay_venues:
venue_pairs["%s_%s" % (venue1, venue2)] = 1.
nay_pairs.append(venue_pairs)
labels = np.hstack([np.ones(len(yay_pairs)), np.zeros(len(nay_pairs))])
return labels, yay_pairs, nay_pairs
def train_and_score(_tr, _vv, _vp, model_sizes, colors=None):
all_venues, train_pairs, valid_pairs = generate_interaction(_tr, _vv, _vp)
print "Creating models"
plt.figure(figsize=(10,10)); lw = 2
roc_aucs = []
for size, color in zip(model_sizes, colors):
extractor = FeatureHasher(n_features=2**size)
model = SGDClassifier(loss="log", penalty="l2", alpha=0.001, n_jobs=-1)
# model = BernoulliNB()
print "Training"
for i, (user, yay_venues) in enumerate(train_pairs.iteritems()):
print "Training on user", i, user
labels, yay_pairs, nay_pairs = generate_features(all_venues, yay_venues)
yay_features, nay_features = extractor.transform(yay_pairs), extractor.transform(nay_pairs)
features = sp.vstack([yay_features, nay_features])
model.partial_fit(features, labels, classes=[0, 1])
print "Testing"
all_labels, all_preds, all_probas = [], [], []
for i, (user, yay_venues) in enumerate(valid_pairs.iteritems()):
print "Testing on user", i, user
labels, yay_pairs, nay_pairs = generate_features(all_venues, yay_venues)
all_labels.extend(labels)
yay_features, nay_features = extractor.transform(yay_pairs), extractor.transform(nay_pairs)
features = sp.vstack([yay_features, nay_features])
preds, probas = model.predict(features), model.predict_proba(features)
all_preds.extend(preds), all_probas.extend(probas[:, 1])
print "Scoring"
roc_auc = roc_auc_score(all_labels, all_probas)
cm = confusion_matrix(all_labels, all_preds)
print "Model size", size, "AUC", roc_auc
print cm
roc_aucs.append(roc_auc)
fpr, tpr, _ = roc_curve(all_labels, all_probas)
plt.plot(fpr, tpr, color=color,
lw=lw, label='Model %d (area = %0.2f)' % (size, roc_auc))
joblib.dump(model, 'model_logit_size%d.pkl' % size)
np.save("labels_logit_size%d.npy" % size, all_labels)
np.save("probas_logit_size%d.npy" % size, all_probas)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, ls='--', label='Luck')
plt.xlim([-.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for different model sizes')
plt.legend(loc="lower right")
# plt.savefig('../plots/model_nb.png')
plt.tight_layout()
plt.show()
'''
plt.figure(figsize=(15,9))
plt.plot(model_sizes, roc_aucs, c='gray', ls='dashed', lw=1)
for model_size, roc_auc in zip(model_sizes, roc_aucs):
plt.plot(model_size, roc_auc, "*", markersize=12)
plt.xlim([model_sizes[0]-.1, model_sizes[-1]+.1])
plt.ylim([0.5, 0.85])
plt.xlabel("Model Size")
plt.ylabel("ROC AUC Score")
plt.title('ROC AUC score for different model sizes')
plt.savefig('../plots/auc_by_model_size_nb.png')
plt.tight_layout()
plt.show()
'''
def main():
# model size by number of bits
# model_size = int(sys.argv[1])
# model_sizes = range(10, 22)
model_sizes = [20]
colors = ['forestgreen']
# colors = ['darkorange', 'skyblue', 'forestgreen', 'darkslategray', 'firebrick']
train_and_score(_tr, _vv, _vp, model_sizes, colors)
if __name__=="__main__":
main() | mit |
dacoex/pvlib-python | pvlib/solarposition.py | 2 | 24136 | """
Calculate the solar position using a variety of methods/packages.
"""
# Contributors:
# Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Will Holmgren (@wholmgren), University of Arizona, 2014
# Tony Lorenzo (@alorenzo175), University of Arizona, 2015
from __future__ import division
import os
import logging
pvl_logger = logging.getLogger('pvlib')
import datetime as dt
try:
from importlib import reload
except ImportError:
try:
from imp import reload
except ImportError:
pass
import numpy as np
import pandas as pd
from pvlib.tools import localize_to_utc, datetime_to_djd, djd_to_datetime
def get_solarposition(time, location, method='nrel_numpy', pressure=101325,
temperature=12, **kwargs):
"""
A convenience wrapper for the solar position calculators.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
method : string
'pyephem' uses the PyEphem package: :func:`pyephem`
'nrel_c' uses the NREL SPA C code [3]: :func:`spa_c`
'nrel_numpy' uses an implementation of the NREL SPA algorithm
described in [1] (default): :func:`spa_python`
'nrel_numba' uses an implementation of the NREL SPA algorithm
described in [1], but also compiles the code first: :func:`spa_python`
'ephemeris' uses the pvlib ephemeris code: :func:`ephemeris`
pressure : float
Pascals.
temperature : float
Degrees C.
Other keywords are passed to the underlying solar position function.
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar radiation
applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
[3] NREL SPA code: http://rredc.nrel.gov/solar/codesandalgorithms/spa/
"""
method = method.lower()
if isinstance(time, dt.datetime):
time = pd.DatetimeIndex([time, ])
if method == 'nrel_c':
ephem_df = spa_c(time, location, pressure, temperature, **kwargs)
elif method == 'nrel_numba':
ephem_df = spa_python(time, location, pressure, temperature,
how='numba', **kwargs)
elif method == 'nrel_numpy':
ephem_df = spa_python(time, location, pressure, temperature,
how='numpy', **kwargs)
elif method == 'pyephem':
ephem_df = pyephem(time, location, pressure, temperature, **kwargs)
elif method == 'ephemeris':
ephem_df = ephemeris(time, location, pressure, temperature, **kwargs)
else:
raise ValueError('Invalid solar position method')
return ephem_df
def spa_c(time, location, pressure=101325, temperature=12, delta_t=67.0,
raw_spa_output=False):
"""
Calculate the solar position using the C implementation of the NREL
SPA code
The source files for this code are located in './spa_c_files/', along with
a README file which describes how the C code is wrapped in Python.
Due to license restrictions, the C code must be downloaded seperately
and used in accordance with it's license.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
pressure : float
Pressure in Pascals
temperature : float
Temperature in C
delta_t : float
Difference between terrestrial time and UT1.
USNO has previous values and predictions.
raw_spa_output : bool
If true, returns the raw SPA output.
Returns
-------
DataFrame
The DataFrame will have the following columns:
elevation,
azimuth,
zenith,
apparent_elevation,
apparent_zenith.
References
----------
NREL SPA code: http://rredc.nrel.gov/solar/codesandalgorithms/spa/
USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_python, ephemeris
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Edited by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
raise ImportError('Could not import built-in SPA calculator. ' +
'You may need to recompile the SPA code.')
pvl_logger.debug('using built-in spa code to calculate solar position')
time_utc = localize_to_utc(time, location)
spa_out = []
for date in time_utc:
spa_out.append(spa_calc(year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second,
timezone=0, # tz corrections handled above
latitude=location.latitude,
longitude=location.longitude,
elevation=location.altitude,
pressure=pressure / 100,
temperature=temperature,
delta_t=delta_t
))
spa_df = pd.DataFrame(spa_out, index=time_utc).tz_convert(location.tz)
if raw_spa_output:
return spa_df
else:
dfout = pd.DataFrame({'azimuth': spa_df['azimuth'],
'apparent_zenith': spa_df['zenith'],
'apparent_elevation': spa_df['e'],
'elevation': spa_df['e0'],
'zenith': 90 - spa_df['e0']})
return dfout
def _spa_python_import(how):
"""Compile spa.py appropriately"""
from pvlib import spa
# check to see if the spa module was compiled with numba
using_numba = spa.USE_NUMBA
if how == 'numpy' and using_numba:
# the spa module was compiled to numba code, so we need to
# reload the module without compiling
# the PVLIB_USE_NUMBA env variable is used to tell the module
# to not compile with numba
os.environ['PVLIB_USE_NUMBA'] = '0'
pvl_logger.debug('Reloading spa module without compiling')
spa = reload(spa)
del os.environ['PVLIB_USE_NUMBA']
elif how == 'numba' and not using_numba:
# The spa module was not compiled to numba code, so set
# PVLIB_USE_NUMBA so it does compile to numba on reload.
os.environ['PVLIB_USE_NUMBA'] = '1'
pvl_logger.debug('Reloading spa module, compiling with numba')
spa = reload(spa)
del os.environ['PVLIB_USE_NUMBA']
elif how != 'numba' and how != 'numpy':
raise ValueError("how must be either 'numba' or 'numpy'")
return spa
def spa_python(time, location, pressure=101325, temperature=12, delta_t=None,
atmos_refract=None, how='numpy', numthreads=4):
"""
Calculate the solar position using a python implementation of the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
pressure : int or float, optional
avg. yearly air pressure in Pascals.
temperature : int or float, optional
avg. yearly air temperature in degrees C.
delta_t : float, optional
Difference between terrestrial time and UT1.
The USNO has historical and forecasted delta_t [3].
atmos_refrac : float, optional
The approximate atmospheric refraction (in degrees)
at sunrise and sunset.
how : str, optional
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional
Number of threads to use if how == 'numba'.
Returns
-------
DataFrame
The DataFrame will have the following columns:
apparent_zenith (degrees),
zenith (degrees),
apparent_elevation (degrees),
elevation (degrees),
azimuth (degrees),
equation_of_time (minutes).
References
----------
[1] I. Reda and A. Andreas, Solar position algorithm for solar
radiation applications. Solar Energy, vol. 76, no. 5, pp. 577-589, 2004.
[2] I. Reda and A. Andreas, Corrigendum to Solar position algorithm for
solar radiation applications. Solar Energy, vol. 81, no. 6, p. 838, 2007.
[3] USNO delta T: http://www.usno.navy.mil/USNO/earth-orientation/eo-products/long-term
See also
--------
pyephem, spa_c, ephemeris
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
pvl_logger.debug('Calculating solar position with spa_python code')
lat = location.latitude
lon = location.longitude
elev = location.altitude
pressure = pressure / 100 # pressure must be in millibars for calculation
delta_t = delta_t or 67.0
atmos_refract = atmos_refract or 0.5667
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (TypeError, ValueError):
time = pd.DatetimeIndex([time, ])
unixtime = localize_to_utc(time, location).astype(np.int64)/10**9
spa = _spa_python_import(how)
app_zenith, zenith, app_elevation, elevation, azimuth, eot = spa.solar_position(
unixtime, lat, lon, elev, pressure, temperature, delta_t,
atmos_refract, numthreads)
result = pd.DataFrame({'apparent_zenith': app_zenith, 'zenith': zenith,
'apparent_elevation': app_elevation,
'elevation': elevation, 'azimuth': azimuth,
'equation_of_time': eot},
index=time)
try:
result = result.tz_convert(location.tz)
except TypeError:
result = result.tz_localize(location.tz)
return result
def get_sun_rise_set_transit(time, location, how='numpy', delta_t=None,
numthreads=4):
"""
Calculate the sunrise, sunset, and sun transit times using the
NREL SPA algorithm described in [1].
If numba is installed, the functions can be compiled to
machine code and the function can be multithreaded.
Without numba, the function evaluates via numpy with
a slight performance hit.
Parameters
----------
time : pandas.DatetimeIndex
Only the date part is used
location : pvlib.Location object
delta_t : float, optional
Difference between terrestrial time and UT1.
By default, use USNO historical data and predictions
how : str, optional
Options are 'numpy' or 'numba'. If numba >= 0.17.0
is installed, how='numba' will compile the spa functions
to machine code and run them multithreaded.
numthreads : int, optional
Number of threads to use if how == 'numba'.
Returns
-------
DataFrame
The DataFrame will have the following columns:
sunrise, sunset, transit
References
----------
[1] Reda, I., Andreas, A., 2003. Solar position algorithm for solar
radiation applications. Technical report: NREL/TP-560- 34302. Golden,
USA, http://www.nrel.gov.
"""
# Added by Tony Lorenzo (@alorenzo175), University of Arizona, 2015
pvl_logger.debug('Calculating sunrise, set, transit with spa_python code')
lat = location.latitude
lon = location.longitude
delta_t = delta_t or 67.0
if not isinstance(time, pd.DatetimeIndex):
try:
time = pd.DatetimeIndex(time)
except (TypeError, ValueError):
time = pd.DatetimeIndex([time, ])
# must convert to midnight UTC on day of interest
utcday = pd.DatetimeIndex(time.date).tz_localize('UTC')
unixtime = utcday.astype(np.int64)/10**9
spa = _spa_python_import(how)
transit, sunrise, sunset = spa.transit_sunrise_sunset(
unixtime, lat, lon, delta_t, numthreads)
# arrays are in seconds since epoch format, need to conver to timestamps
transit = pd.to_datetime(transit, unit='s', utc=True).tz_convert(
location.tz).tolist()
sunrise = pd.to_datetime(sunrise, unit='s', utc=True).tz_convert(
location.tz).tolist()
sunset = pd.to_datetime(sunset, unit='s', utc=True).tz_convert(
location.tz).tolist()
result = pd.DataFrame({'transit': transit,
'sunrise': sunrise,
'sunset': sunset}, index=time)
try:
result = result.tz_convert(location.tz)
except TypeError:
result = result.tz_localize(location.tz)
return result
def _ephem_setup(location, pressure, temperature):
import ephem
# initialize a PyEphem observer
obs = ephem.Observer()
obs.lat = str(location.latitude)
obs.lon = str(location.longitude)
obs.elevation = location.altitude
obs.pressure = pressure / 100. # convert to mBar
obs.temp = temperature
# the PyEphem sun
sun = ephem.Sun()
return obs, sun
def pyephem(time, location, pressure=101325, temperature=12):
"""
Calculate the solar position using the PyEphem package.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location object
pressure : int or float, optional
air pressure in Pascals.
temperature : int or float, optional
air temperature in degrees C.
Returns
-------
DataFrame
The DataFrame will have the following columns:
apparent_elevation, elevation,
apparent_azimuth, azimuth,
apparent_zenith, zenith.
See also
--------
spa_python, spa_c, ephemeris
"""
# Written by Will Holmgren (@wholmgren), University of Arizona, 2014
try:
import ephem
except ImportError:
raise ImportError('PyEphem must be installed')
pvl_logger.debug('using PyEphem to calculate solar position')
time_utc = localize_to_utc(time, location)
sun_coords = pd.DataFrame(index=time_utc)
obs, sun = _ephem_setup(location, pressure, temperature)
# make and fill lists of the sun's altitude and azimuth
# this is the pressure and temperature corrected apparent alt/az.
alts = []
azis = []
for thetime in sun_coords.index:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['apparent_elevation'] = alts
sun_coords['apparent_azimuth'] = azis
# redo it for p=0 to get no atmosphere alt/az
obs.pressure = 0
alts = []
azis = []
for thetime in sun_coords.index:
obs.date = ephem.Date(thetime)
sun.compute(obs)
alts.append(sun.alt)
azis.append(sun.az)
sun_coords['elevation'] = alts
sun_coords['azimuth'] = azis
# convert to degrees. add zenith
sun_coords = np.rad2deg(sun_coords)
sun_coords['apparent_zenith'] = 90 - sun_coords['apparent_elevation']
sun_coords['zenith'] = 90 - sun_coords['elevation']
try:
return sun_coords.tz_convert(location.tz)
except TypeError:
return sun_coords.tz_localize(location.tz)
def ephemeris(time, location, pressure=101325, temperature=12):
"""
Python-native solar position calculator.
The accuracy of this code is not guaranteed.
Consider using the built-in spa_c code or the PyEphem library.
Parameters
----------
time : pandas.DatetimeIndex
location : pvlib.Location
pressure : float or Series
Ambient pressure (Pascals)
temperature : float or Series
Ambient temperature (C)
Returns
-------
DataFrame with the following columns:
* apparent_elevation : apparent sun elevation accounting for
atmospheric refraction.
* elevation : actual elevation (not accounting for refraction)
of the sun in decimal degrees, 0 = on horizon.
The complement of the zenith angle.
* azimuth : Azimuth of the sun in decimal degrees East of North.
This is the complement of the apparent zenith angle.
* apparent_zenith : apparent sun zenith accounting for atmospheric
refraction.
* zenith : Solar zenith angle
* solar_time : Solar time in decimal hours (solar noon is 12.00).
References
-----------
Grover Hughes' class and related class materials on Engineering
Astronomy at Sandia National Laboratories, 1985.
See also
--------
pyephem, spa_c, spa_python
"""
# Added by Rob Andrews (@Calama-Consulting), Calama Consulting, 2014
# Edited by Will Holmgren (@wholmgren), University of Arizona, 2014
# Most comments in this function are from PVLIB_MATLAB or from
# pvlib-python's attempt to understand and fix problems with the
# algorithm. The comments are *not* based on the reference material.
# This helps a little bit:
# http://www.cv.nrao.edu/~rfisher/Ephemerides/times.html
pvl_logger.debug('location=%s, temperature=%s, pressure=%s',
location, temperature, pressure)
# the inversion of longitude is due to the fact that this code was
# originally written for the convention that positive longitude were for
# locations west of the prime meridian. However, the correct convention (as
# of 2009) is to use negative longitudes for locations west of the prime
# meridian. Therefore, the user should input longitude values under the
# correct convention (e.g. Albuquerque is at -106 longitude), but it needs
# to be inverted for use in the code.
Latitude = location.latitude
Longitude = -1 * location.longitude
Abber = 20 / 3600.
LatR = np.radians(Latitude)
# the SPA algorithm needs time to be expressed in terms of
# decimal UTC hours of the day of the year.
# first convert to utc
time_utc = localize_to_utc(time, location)
# strip out the day of the year and calculate the decimal hour
DayOfYear = time_utc.dayofyear
DecHours = (time_utc.hour + time_utc.minute/60. + time_utc.second/3600. +
time_utc.microsecond/3600.e6)
UnivDate = DayOfYear
UnivHr = DecHours
Yr = time_utc.year - 1900
YrBegin = 365 * Yr + np.floor((Yr - 1) / 4.) - 0.5
Ezero = YrBegin + UnivDate
T = Ezero / 36525.
# Calculate Greenwich Mean Sidereal Time (GMST)
GMST0 = 6 / 24. + 38 / 1440. + (
45.836 + 8640184.542 * T + 0.0929 * T ** 2) / 86400.
GMST0 = 360 * (GMST0 - np.floor(GMST0))
GMSTi = np.mod(GMST0 + 360 * (1.0027379093 * UnivHr / 24.), 360)
# Local apparent sidereal time
LocAST = np.mod((360 + GMSTi - Longitude), 360)
EpochDate = Ezero + UnivHr / 24.
T1 = EpochDate / 36525.
ObliquityR = np.radians(
23.452294 - 0.0130125 * T1 - 1.64e-06 * T1 ** 2 + 5.03e-07 * T1 ** 3)
MlPerigee = 281.22083 + 4.70684e-05 * EpochDate + 0.000453 * T1 ** 2 + (
3e-06 * T1 ** 3)
MeanAnom = np.mod((358.47583 + 0.985600267 * EpochDate - 0.00015 *
T1 ** 2 - 3e-06 * T1 ** 3), 360)
Eccen = 0.01675104 - 4.18e-05 * T1 - 1.26e-07 * T1 ** 2
EccenAnom = MeanAnom
E = 0
while np.max(abs(EccenAnom - E)) > 0.0001:
E = EccenAnom
EccenAnom = MeanAnom + np.degrees(Eccen)*np.sin(np.radians(E))
TrueAnom = (
2 * np.mod(np.degrees(np.arctan2(((1 + Eccen) / (1 - Eccen)) ** 0.5 *
np.tan(np.radians(EccenAnom) / 2.), 1)), 360))
EcLon = np.mod(MlPerigee + TrueAnom, 360) - Abber
EcLonR = np.radians(EcLon)
DecR = np.arcsin(np.sin(ObliquityR)*np.sin(EcLonR))
RtAscen = np.degrees(np.arctan2(np.cos(ObliquityR)*np.sin(EcLonR),
np.cos(EcLonR)))
HrAngle = LocAST - RtAscen
HrAngleR = np.radians(HrAngle)
HrAngle = HrAngle - (360 * ((abs(HrAngle) > 180)))
SunAz = np.degrees(np.arctan2(-np.sin(HrAngleR),
np.cos(LatR)*np.tan(DecR) -
np.sin(LatR)*np.cos(HrAngleR)))
SunAz[SunAz < 0] += 360
SunEl = np.degrees(np.arcsin(
np.cos(LatR) * np.cos(DecR) * np.cos(HrAngleR) +
np.sin(LatR) * np.sin(DecR)))
SolarTime = (180 + HrAngle) / 15.
# Calculate refraction correction
Elevation = SunEl
TanEl = pd.Series(np.tan(np.radians(Elevation)), index=time_utc)
Refract = pd.Series(0, index=time_utc)
Refract[(Elevation > 5) & (Elevation <= 85)] = (
58.1/TanEl - 0.07/(TanEl**3) + 8.6e-05/(TanEl**5))
Refract[(Elevation > -0.575) & (Elevation <= 5)] = (
Elevation *
(-518.2 + Elevation*(103.4 + Elevation*(-12.79 + Elevation*0.711))) +
1735)
Refract[(Elevation > -1) & (Elevation <= -0.575)] = -20.774 / TanEl
Refract *= (283/(273. + temperature)) * (pressure/101325.) / 3600.
ApparentSunEl = SunEl + Refract
# make output DataFrame
DFOut = pd.DataFrame(index=time_utc).tz_convert(location.tz)
DFOut['apparent_elevation'] = ApparentSunEl
DFOut['elevation'] = SunEl
DFOut['azimuth'] = SunAz
DFOut['apparent_zenith'] = 90 - ApparentSunEl
DFOut['zenith'] = 90 - SunEl
DFOut['solar_time'] = SolarTime
return DFOut
def calc_time(lower_bound, upper_bound, location, attribute, value,
pressure=101325, temperature=12, xtol=1.0e-12):
"""
Calculate the time between lower_bound and upper_bound
where the attribute is equal to value. Uses PyEphem for
solar position calculations.
Parameters
----------
lower_bound : datetime.datetime
upper_bound : datetime.datetime
location : pvlib.Location object
attribute : str
The attribute of a pyephem.Sun object that
you want to solve for. Likely options are 'alt'
and 'az' (which must be given in radians).
value : int or float
The value of the attribute to solve for
pressure : int or float, optional
Air pressure in Pascals. Set to 0 for no
atmospheric correction.
temperature : int or float, optional
Air temperature in degrees C.
xtol : float, optional
The allowed error in the result from value
Returns
-------
datetime.datetime
Raises
------
ValueError
If the value is not contained between the bounds.
AttributeError
If the given attribute is not an attribute of a
PyEphem.Sun object.
"""
try:
import scipy.optimize as so
except ImportError:
raise ImportError('The calc_time function requires scipy')
obs, sun = _ephem_setup(location, pressure, temperature)
def compute_attr(thetime, target, attr):
obs.date = thetime
sun.compute(obs)
return getattr(sun, attr) - target
lb = datetime_to_djd(lower_bound)
ub = datetime_to_djd(upper_bound)
djd_root = so.brentq(compute_attr, lb, ub,
(value, attribute), xtol=xtol)
return djd_to_datetime(djd_root, location.tz)
def pyephem_earthsun_distance(time):
"""
Calculates the distance from the earth to the sun using pyephem.
Parameters
----------
time : pd.DatetimeIndex
Returns
-------
pd.Series. Earth-sun distance in AU.
"""
pvl_logger.debug('solarposition.pyephem_earthsun_distance()')
import ephem
sun = ephem.Sun()
earthsun = []
for thetime in time:
sun.compute(ephem.Date(thetime))
earthsun.append(sun.earth_distance)
return pd.Series(earthsun, index=time)
| bsd-3-clause |
matplotlib/mpl-probscale | setup.py | 1 | 1593 | # Setup script for the probscale package
#
# Usage: python setup.py install
import os
from setuptools import setup, find_packages
DESCRIPTION = "mpl-probscale: Probabily scales for matplotlib"
LONG_DESCRIPTION = DESCRIPTION
NAME = "probscale"
VERSION = "0.2.3"
AUTHOR = "Paul Hobson (Geosyntec Consultants)"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/matplotlib/mpl-probscale"
DOWNLOAD_URL = "https://github.com/matplotlib/mpl-probscale/archive/master.zip"
LICENSE = "BSD 3-clause"
PACKAGES = find_packages()
PLATFORMS = "Python 2.7, 3.4 and later."
CLASSIFIERS = [
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
INSTALL_REQUIRES = ['numpy', 'matplotlib']
PACKAGE_DATA = {
'probscale.tests.baseline_images.test_viz': ['*png'],
'probscale.tests.baseline_images.test_probscale': ['*png'],
}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=PACKAGES,
package_data=PACKAGE_DATA,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
zip_safe=False,
)
| bsd-3-clause |
dsm054/pandas | pandas/tests/sparse/test_pivot.py | 1 | 2465 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestPivotTable(object):
def setup_method(self, method):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': [np.nan, np.nan, 1, 2,
np.nan, 1, np.nan, np.nan]})
self.sparse = self.dense.to_sparse()
def test_pivot_table(self):
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='C')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='C')
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='E')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='E')
tm.assert_frame_equal(res_sparse, res_dense)
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values='E', aggfunc='mean')
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values='E', aggfunc='mean')
tm.assert_frame_equal(res_sparse, res_dense)
# ToDo: sum doesn't handle nan properly
# res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
# values='E', aggfunc='sum')
# res_dense = pd.pivot_table(self.dense, index='A', columns='B',
# values='E', aggfunc='sum')
# tm.assert_frame_equal(res_sparse, res_dense)
def test_pivot_table_multi(self):
res_sparse = pd.pivot_table(self.sparse, index='A', columns='B',
values=['D', 'E'])
res_dense = pd.pivot_table(self.dense, index='A', columns='B',
values=['D', 'E'])
res_dense = res_dense.apply(lambda x: x.astype("Sparse[float64]"))
tm.assert_frame_equal(res_sparse, res_dense)
| bsd-3-clause |
wronk/mne-python | tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py | 3 | 10216 | """
.. _tut_stats_cluster_sensor_rANOVA_tfr:
====================================================================
Mass-univariate twoway repeated measures ANOVA on single trial power
====================================================================
This script shows how to conduct a mass-univariate repeated measures
ANOVA. As the model to be fitted assumes two fully crossed factors,
we will study the interplay between perceptual modality
(auditory VS visual) and the location of stimulus presentation
(left VS right). Here we use single trials as replications
(subjects) while iterating over time slices plus frequency bands
for to fit our mass-univariate model. For the sake of simplicity we
will confine this analysis to one single channel of which we know
that it exposes a strong induced response. We will then visualize
each effect by creating a corresponding mass-univariate effect
image. We conclude with accounting for multiple comparisons by
performing a permutation clustering test using the ANOVA as
clustering function. The results final will be compared to
multiple comparisons using False Discovery Rate correction.
"""
# Authors: Denis Engemann <[email protected]>
# Eric Larson <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import single_trial_power
from mne.stats import f_threshold_mway_rm, f_mway_rm, fdr_correction
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443'] # bads
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = raw.info['ch_names'][picks[0]]
# Load conditions
reject = dict(grad=4000e-13, eog=150e-6)
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject)
###############################################################################
# We have to make sure all conditions have the same counts, as the ANOVA
# expects a fully balanced data matrix and does not forgive imbalances that
# generously (risk of type-I error).
epochs.equalize_event_counts(event_id, copy=False)
# Time vector
times = 1e3 * epochs.times # change unit to ms
# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power.
decim = 2
frequencies = np.arange(7, 30, 3) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
n_cycles = frequencies / frequencies[0]
baseline_mask = times[::decim] < 0
###############################################################################
# Create TFR representations for all conditions
# ---------------------------------------------
epochs_power = list()
for condition in [epochs[k].get_data()[:, 97:98, :] for k in event_id]:
this_power = single_trial_power(condition, sfreq=sfreq,
frequencies=frequencies, n_cycles=n_cycles,
decim=decim)
this_power = this_power[:, 0, :, :] # we only have one channel.
# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
epochs_baseline = np.mean(this_power[:, :, baseline_mask], axis=2)
this_power /= epochs_baseline[..., np.newaxis]
epochs_power.append(this_power)
###############################################################################
# Setup repeated measures ANOVA
# -----------------------------
#
# We will tell the ANOVA how to interpret the data matrix in terms of factors.
# This is done via the factor levels argument which is a list of the number
# factor levels for each factor.
n_conditions = len(epochs.event_id)
n_replications = epochs.events.shape[0] / n_conditions
factor_levels = [2, 2] # number of levels in each factor
effects = 'A*B' # this is the default signature for computing all effects
# Other possible options are 'A' or 'B' for the corresponding main effects
# or 'A:B' for the interaction effect only (this notation is borrowed from the
# R formula language)
n_frequencies = len(frequencies)
n_times = len(times[::decim])
###############################################################################
# Now we'll assemble the data matrix and swap axes so the trial replications
# are the first dimension and the conditions are the second dimension.
data = np.swapaxes(np.asarray(epochs_power), 1, 0)
# reshape last two dimensions in one mass-univariate observation-vector
data = data.reshape(n_replications, n_conditions, n_frequencies * n_times)
# so we have replications * conditions * observations:
print(data.shape)
###############################################################################
# While the iteration scheme used above for assembling the data matrix
# makes sure the first two dimensions are organized as expected (with A =
# modality and B = location):
#
# .. table::
#
# ===== ==== ==== ==== ====
# trial A1B1 A1B2 A2B1 B2B2
# ===== ==== ==== ==== ====
# 1 1.34 2.53 0.97 1.74
# ... .... .... .... ....
# 56 2.45 7.90 3.09 4.76
# ===== ==== ==== ==== ====
#
# Now we're ready to run our repeated measures ANOVA.
#
# Note. As we treat trials as subjects, the test only accounts for
# time locked responses despite the 'induced' approach.
# For analysis for induced power at the group level averaged TRFs
# are required.
fvals, pvals = f_mway_rm(data, factor_levels, effects=effects)
effect_labels = ['modality', 'location', 'modality by location']
# let's visualize our effects by computing f-images
for effect, sig, effect_label in zip(fvals, pvals, effect_labels):
plt.figure()
# show naive F-values in gray
plt.imshow(effect.reshape(8, 211), cmap=plt.cm.gray, extent=[times[0],
times[-1], frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
# create mask for significant Time-frequency locations
effect = np.ma.masked_array(effect, [sig > .05])
plt.imshow(effect.reshape(8, 211), cmap='RdBu_r', extent=[times[0],
times[-1], frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
plt.colorbar()
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(r"Time-locked response for '%s' (%s)" % (effect_label, ch_name))
plt.show()
###############################################################################
# Account for multiple comparisons using FDR versus permutation clustering test
# -----------------------------------------------------------------------------
#
# First we need to slightly modify the ANOVA function to be suitable for
# the clustering procedure. Also want to set some defaults.
# Let's first override effects to confine the analysis to the interaction
effects = 'A:B'
###############################################################################
# A stat_fun must deal with a variable number of input arguments.
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
# The following function catches the list input and swaps the first and
# the second dimension and finally calls the ANOVA function.
def stat_fun(*args):
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=False)[0]
# The ANOVA returns a tuple f-values and p-values, we will pick the former.
pthresh = 0.00001 # set threshold rather high to save some time
f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects,
pthresh)
tail = 1 # f-test, so tail > 0
n_permutations = 256 # Save some time (the test won't be too sensitive ...)
T_obs, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
epochs_power, stat_fun=stat_fun, threshold=f_thresh, tail=tail, n_jobs=1,
n_permutations=n_permutations, buffer_size=None)
###############################################################################
# Create new stats image with only significant clusters
# -----------------------------------------------------
good_clusers = np.where(cluster_p_values < .05)[0]
T_obs_plot = np.ma.masked_array(T_obs,
np.invert(clusters[np.squeeze(good_clusers)]))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Time-locked response for \'modality by location\' (%s)\n'
' cluster-level corrected (p <= 0.05)' % ch_name)
plt.show()
###############################################################################
# Now using FDR
# -------------
mask, _ = fdr_correction(pvals[2])
T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask))
plt.figure()
for f_image, cmap in zip([T_obs, T_obs_plot2], [plt.cm.gray, 'RdBu_r']):
plt.imshow(f_image, cmap=cmap, extent=[times[0], times[-1],
frequencies[0], frequencies[-1]], aspect='auto',
origin='lower')
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Time-locked response for \'modality by location\' (%s)\n'
' FDR corrected (p <= 0.05)' % ch_name)
plt.show()
# Both, cluster level and FDR correction help getting rid of
# putatively spots we saw in the naive f-images.
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/groupby/test_groupby_dropna.py | 1 | 7193 | import numpy as np
import pytest
import pandas as pd
import pandas.testing as tm
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [13.0, 123.23], "d": [13.0, 123.0], "e": [13.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"]],
{
"c": [13.0, 12.3, 123.23],
"d": [13.0, 233.0, 123.0],
"e": [13.0, 12.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_one_group(
dropna, tuples, outputs, nulls_fixture
):
# GH 3729 this is to test that NA is in one group
df_list = [
["A", "B", 12, 12, 12],
["A", nulls_fixture, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
["A", "B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [12.0, 123.23], "d": [12.0, 123.0], "e": [12.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"], [np.nan, "B"]],
{
"c": [12.0, 13.3, 123.23, 1.0],
"d": [12.0, 234.0, 123.0, 1.0],
"e": [12.0, 13.0, 1.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups(
dropna, tuples, outputs, nulls_fixture, nulls_fixture2
):
# GH 3729 this is to test that NA in different groups with different representations
df_list = [
["A", "B", 12, 12, 12],
["A", nulls_fixture, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
[nulls_fixture2, "B", 1, 1, 1.0],
["A", nulls_fixture2, 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, idx, outputs",
[
(True, ["A", "B"], {"b": [123.23, 13.0], "c": [123.0, 13.0], "d": [1.0, 13.0]}),
(
False,
["A", "B", np.nan],
{
"b": [123.23, 13.0, 12.3],
"c": [123.0, 13.0, 233.0],
"d": [1.0, 13.0, 12.0],
},
),
],
)
def test_groupby_dropna_normal_index_dataframe(dropna, idx, outputs):
# GH 3729
df_list = [
["B", 12, 12, 12],
[None, 12.3, 233.0, 12],
["A", 123.23, 123, 1],
["B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d"])
grouped = df.groupby("a", dropna=dropna).sum()
expected = pd.DataFrame(outputs, index=pd.Index(idx, dtype="object", name="a"))
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"dropna, idx, expected",
[
(True, ["a", "a", "b", np.nan], pd.Series([3, 3], index=["a", "b"])),
(
False,
["a", "a", "b", np.nan],
pd.Series([3, 3, 3], index=["a", "b", np.nan]),
),
],
)
def test_groupby_dropna_series_level(dropna, idx, expected):
ser = pd.Series([1, 2, 3, 3], index=idx)
result = ser.groupby(level=0, dropna=dropna).sum()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, expected",
[
(True, pd.Series([210.0, 350.0], index=["a", "b"], name="Max Speed")),
(
False,
pd.Series([210.0, 350.0, 20.0], index=["a", "b", np.nan], name="Max Speed"),
),
],
)
def test_groupby_dropna_series_by(dropna, expected):
ser = pd.Series(
[390.0, 350.0, 30.0, 20.0],
index=["Falcon", "Falcon", "Parrot", "Parrot"],
name="Max Speed",
)
result = ser.groupby(["a", "b", "a", np.nan], dropna=dropna).mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dropna, tuples, outputs",
[
(
True,
[["A", "B"], ["B", "A"]],
{"c": [13.0, 123.23], "d": [12.0, 123.0], "e": [1.0, 1.0]},
),
(
False,
[["A", "B"], ["A", np.nan], ["B", "A"]],
{
"c": [13.0, 12.3, 123.23],
"d": [12.0, 233.0, 123.0],
"e": [1.0, 12.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs):
# GH 3729
df_list = [
["A", "B", 12, 12, 12],
["A", None, 12.3, 233.0, 12],
["B", "A", 123.23, 123, 1],
["A", "B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
agg_dict = {"c": sum, "d": max, "e": "min"}
grouped = df.groupby(["a", "b"], dropna=dropna).agg(agg_dict)
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
@pytest.mark.parametrize(
"datetime1, datetime2",
[
(pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")),
(pd.Timedelta("-2 days"), pd.Timedelta("-1 days")),
(pd.Period("2020-01-01"), pd.Period("2020-02-01")),
],
)
@pytest.mark.parametrize(
"dropna, values", [(True, [12, 3]), (False, [12, 3, 6],)],
)
def test_groupby_dropna_datetime_like_data(
dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2
):
# 3729
df = pd.DataFrame(
{
"values": [1, 2, 3, 4, 5, 6],
"dt": [
datetime1,
unique_nulls_fixture,
datetime2,
unique_nulls_fixture2,
datetime1,
datetime1,
],
}
)
if dropna:
indexes = [datetime1, datetime2]
else:
indexes = [datetime1, datetime2, np.nan]
grouped = df.groupby("dt", dropna=dropna).agg({"values": sum})
expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt"))
tm.assert_frame_equal(grouped, expected)
| bsd-3-clause |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tseries/tests/test_frequencies.py | 9 | 25284 | from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week()), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=1)), 4000)
self.assertEqual(frequencies.get_freq_group(offsets.Week(weekday=5)), 4000)
def test_get_to_timestamp_base(self):
tsb = frequencies.get_to_timestamp_base
self.assertEqual(tsb(frequencies.get_freq_code('D')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('W')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('M')[0]),
frequencies.get_freq_code('D')[0])
self.assertEqual(tsb(frequencies.get_freq_code('S')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('T')[0]),
frequencies.get_freq_code('S')[0])
self.assertEqual(tsb(frequencies.get_freq_code('H')[0]),
frequencies.get_freq_code('S')[0])
def test_freq_to_reso(self):
Reso = frequencies.Resolution
self.assertEqual(Reso.get_str_from_freq('A'), 'year')
self.assertEqual(Reso.get_str_from_freq('Q'), 'quarter')
self.assertEqual(Reso.get_str_from_freq('M'), 'month')
self.assertEqual(Reso.get_str_from_freq('D'), 'day')
self.assertEqual(Reso.get_str_from_freq('H'), 'hour')
self.assertEqual(Reso.get_str_from_freq('T'), 'minute')
self.assertEqual(Reso.get_str_from_freq('S'), 'second')
self.assertEqual(Reso.get_str_from_freq('L'), 'millisecond')
self.assertEqual(Reso.get_str_from_freq('U'), 'microsecond')
self.assertEqual(Reso.get_str_from_freq('N'), 'nanosecond')
for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']:
# check roundtrip
result = Reso.get_freq(Reso.get_str_from_freq(freq))
self.assertEqual(freq, result)
for freq in ['D', 'H', 'T', 'S', 'L', 'U']:
result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq)))
self.assertEqual(freq, result)
def test_get_freq_code(self):
# freqstr
self.assertEqual(frequencies.get_freq_code('A'),
(frequencies.get_freq('A'), 1))
self.assertEqual(frequencies.get_freq_code('3D'),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code('-2M'),
(frequencies.get_freq('M'), -2))
# tuple
self.assertEqual(frequencies.get_freq_code(('D', 1)),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(('A', 3)),
(frequencies.get_freq('A'), 3))
self.assertEqual(frequencies.get_freq_code(('M', -2)),
(frequencies.get_freq('M'), -2))
# numeric tuple
self.assertEqual(frequencies.get_freq_code((1000, 1)), (1000, 1))
# offsets
self.assertEqual(frequencies.get_freq_code(offsets.Day()),
(frequencies.get_freq('D'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Day(3)),
(frequencies.get_freq('D'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Day(-2)),
(frequencies.get_freq('D'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd()),
(frequencies.get_freq('M'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(3)),
(frequencies.get_freq('M'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.MonthEnd(-2)),
(frequencies.get_freq('M'), -2))
self.assertEqual(frequencies.get_freq_code(offsets.Week()),
(frequencies.get_freq('W'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3)),
(frequencies.get_freq('W'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2)),
(frequencies.get_freq('W'), -2))
# monday is weekday=0
self.assertEqual(frequencies.get_freq_code(offsets.Week(weekday=1)),
(frequencies.get_freq('W-TUE'), 1))
self.assertEqual(frequencies.get_freq_code(offsets.Week(3, weekday=0)),
(frequencies.get_freq('W-MON'), 3))
self.assertEqual(frequencies.get_freq_code(offsets.Week(-2, weekday=4)),
(frequencies.get_freq('W-FRI'), -2))
_dti = DatetimeIndex
class TestFrequencyInference(tm.TestCase):
def test_raise_if_period_index(self):
index = PeriodIndex(start="1/1/1990", periods=20, freq="M")
self.assertRaises(TypeError, frequencies.infer_freq, index)
def test_raise_if_too_few(self):
index = _dti(['12/31/1998', '1/3/1999'])
self.assertRaises(ValueError, frequencies.infer_freq, index)
def test_business_daily(self):
index = _dti(['12/31/1998', '1/3/1999', '1/4/1999'])
self.assertEqual(frequencies.infer_freq(index), 'B')
def test_day(self):
self._check_tick(timedelta(1), 'D')
def test_day_corner(self):
index = _dti(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(index), 'D')
def test_non_datetimeindex(self):
dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000'])
self.assertEqual(frequencies.infer_freq(dates), 'D')
def test_hour(self):
self._check_tick(timedelta(hours=1), 'H')
def test_minute(self):
self._check_tick(timedelta(minutes=1), 'T')
def test_second(self):
self._check_tick(timedelta(seconds=1), 'S')
def test_millisecond(self):
self._check_tick(timedelta(microseconds=1000), 'L')
def test_microsecond(self):
self._check_tick(timedelta(microseconds=1), 'U')
def test_nanosecond(self):
self._check_tick(np.timedelta64(1, 'ns'), 'N')
def _check_tick(self, base_delta, code):
b = Timestamp(datetime.now())
for i in range(1, 5):
inc = base_delta * i
index = _dti([b + inc * j for j in range(3)])
if i > 1:
exp_freq = '%d%s' % (i, code)
else:
exp_freq = code
self.assertEqual(frequencies.infer_freq(index), exp_freq)
index = _dti([b + base_delta * 7] +
[b + base_delta * j for j in range(3)])
self.assertIsNone(frequencies.infer_freq(index))
index = _dti([b + base_delta * j for j in range(3)] +
[b + base_delta * 7])
self.assertIsNone(frequencies.infer_freq(index))
def test_weekly(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
self._check_generated_range('1/1/2000', 'W-%s' % day)
def test_week_of_month(self):
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
for day in days:
for i in range(1, 5):
self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day))
def test_fifth_week_of_month(self):
# Only supports freq up to WOM-4. See #9425
func = lambda: date_range('2014-01-01', freq='WOM-5MON')
self.assertRaises(ValueError, func)
def test_fifth_week_of_month_infer(self):
# Only attempts to infer up to WOM-4. See #9425
index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
assert frequencies.infer_freq(index) is None
def test_week_of_month_fake(self):
#All of these dates are on same day of week and are 4 or 5 weeks apart
index = DatetimeIndex(["2013-08-27","2013-10-01","2013-10-29","2013-11-26"])
assert frequencies.infer_freq(index) != 'WOM-4TUE'
def test_monthly(self):
self._check_generated_range('1/1/2000', 'M')
def test_monthly_ambiguous(self):
rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000'])
self.assertEqual(rng.inferred_freq, 'M')
def test_business_monthly(self):
self._check_generated_range('1/1/2000', 'BM')
def test_business_start_monthly(self):
self._check_generated_range('1/1/2000', 'BMS')
def test_quarterly(self):
for month in ['JAN', 'FEB', 'MAR']:
self._check_generated_range('1/1/2000', 'Q-%s' % month)
def test_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'A-%s' % month)
def test_business_annual(self):
for month in MONTHS:
self._check_generated_range('1/1/2000', 'BA-%s' % month)
def test_annual_ambiguous(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
self.assertEqual(rng.inferred_freq, 'A-JAN')
def _check_generated_range(self, start, freq):
freq = freq.upper()
gen = date_range(start, periods=7, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
gen = date_range(start, periods=5, freq=freq)
index = _dti(gen.values)
if not freq.startswith('Q-'):
self.assertEqual(frequencies.infer_freq(index), gen.freqstr)
else:
inf_freq = frequencies.infer_freq(index)
self.assertTrue((inf_freq == 'Q-DEC' and
gen.freqstr in ('Q', 'Q-DEC', 'Q-SEP', 'Q-JUN',
'Q-MAR'))
or
(inf_freq == 'Q-NOV' and
gen.freqstr in ('Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB'))
or
(inf_freq == 'Q-OCT' and
gen.freqstr in ('Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN')))
def test_infer_freq(self):
rng = period_range('1959Q2', '2009Q3', freq='Q')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-DEC')
rng = period_range('1959Q2', '2009Q3', freq='Q-NOV')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-NOV')
rng = period_range('1959Q2', '2009Q3', freq='Q-OCT')
rng = Index(rng.to_timestamp('D', how='e').asobject)
self.assertEqual(rng.inferred_freq, 'Q-OCT')
def test_infer_freq_tz(self):
freqs = {'AS-JAN': ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'],
'Q-OCT': ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'],
'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'],
'W-SAT': ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'],
'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'],
'H': ['2011-12-31 22:00', '2011-12-31 23:00', '2012-01-01 00:00', '2012-01-01 01:00']
}
# GH 7310
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for expected, dates in compat.iteritems(freqs):
idx = DatetimeIndex(dates, tz=tz)
self.assertEqual(idx.inferred_freq, expected)
def test_infer_freq_tz_transition(self):
# Tests for #8772
date_pairs = [['2013-11-02', '2013-11-5'], #Fall DST
['2014-03-08', '2014-03-11'], #Spring DST
['2014-01-01', '2014-01-03']] #Regular Time
freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', '3600000000001N']
for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris',
'US/Pacific', 'US/Eastern']:
for date_pair in date_pairs:
for freq in freqs:
idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
self.assertEqual(idx.inferred_freq, freq)
index = date_range("2013-11-03", periods=5, freq="3H").tz_localize("America/Chicago")
self.assertIsNone(index.inferred_freq)
def test_infer_freq_businesshour(self):
# GH 7905
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00'])
# hourly freq in a day must result in 'H'
self.assertEqual(idx.inferred_freq, 'H')
idx = DatetimeIndex(['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00',
'2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00',
'2014-07-01 15:00', '2014-07-01 16:00',
'2014-07-02 09:00', '2014-07-02 10:00', '2014-07-02 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00'])
self.assertEqual(idx.inferred_freq, 'BH')
idx = DatetimeIndex(['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00',
'2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00',
'2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00', '2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00', '2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00', '2014-07-07 16:00',
'2014-07-08 09:00', '2014-07-08 10:00', '2014-07-08 11:00',
'2014-07-08 12:00', '2014-07-08 13:00', '2014-07-08 14:00',
'2014-07-08 15:00', '2014-07-08 16:00'])
self.assertEqual(idx.inferred_freq, 'BH')
def test_not_monotonic(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
rng = rng[::-1]
self.assertEqual(rng.inferred_freq, '-1A-JAN')
def test_non_datetimeindex(self):
rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002'])
vals = rng.to_pydatetime()
result = frequencies.infer_freq(vals)
self.assertEqual(result, rng.inferred_freq)
def test_invalid_index_types(self):
# test all index types
for i in [ tm.makeIntIndex(10),
tm.makeFloatIndex(10),
tm.makePeriodIndex(10) ]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(i))
# GH 10822
# odd error message on conversions to datetime for unicode
if not is_platform_windows():
for i in [ tm.makeStringIndex(10),
tm.makeUnicodeIndex(10) ]:
self.assertRaises(ValueError, lambda : frequencies.infer_freq(i))
def test_string_datetimelike_compat(self):
# GH 6463
expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', '2004-04'])
result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', '2004-04']))
self.assertEqual(result,expected)
def test_series(self):
# GH6407
# inferring series
# invalid type of Series
for s in [ Series(np.arange(10)),
Series(np.arange(10.))]:
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# a non-convertible string
self.assertRaises(ValueError, lambda : frequencies.infer_freq(Series(['foo','bar'])))
# cannot infer on PeriodIndex
for freq in [None, 'L']:
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
for freq in ['Y']:
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s = Series(period_range('2013',periods=10,freq=freq))
self.assertRaises(TypeError, lambda : frequencies.infer_freq(s))
# DateTimeIndex
for freq in ['M', 'L', 'S']:
s = Series(date_range('20130101',periods=10,freq=freq))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,freq)
s = Series(date_range('20130101','20130110'))
inferred = frequencies.infer_freq(s)
self.assertEqual(inferred,'D')
def test_legacy_offset_warnings(self):
for k, v in compat.iteritems(frequencies._rule_aliases):
with tm.assert_produces_warning(FutureWarning):
result = frequencies.get_offset(k)
exp = frequencies.get_offset(v)
self.assertEqual(result, exp)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
idx = date_range('2011-01-01', periods=5, freq=k)
exp = date_range('2011-01-01', periods=5, freq=v)
self.assert_index_equal(idx, exp)
MONTHS = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP',
'OCT', 'NOV', 'DEC']
def test_is_superperiod_subperiod():
assert(frequencies.is_superperiod(offsets.YearEnd(), offsets.MonthEnd()))
assert(frequencies.is_subperiod(offsets.MonthEnd(), offsets.YearEnd()))
assert(frequencies.is_superperiod(offsets.Hour(), offsets.Minute()))
assert(frequencies.is_subperiod(offsets.Minute(), offsets.Hour()))
assert(frequencies.is_superperiod(offsets.Second(), offsets.Milli()))
assert(frequencies.is_subperiod(offsets.Milli(), offsets.Second()))
assert(frequencies.is_superperiod(offsets.Milli(), offsets.Micro()))
assert(frequencies.is_subperiod(offsets.Micro(), offsets.Milli()))
assert(frequencies.is_superperiod(offsets.Micro(), offsets.Nano()))
assert(frequencies.is_subperiod(offsets.Nano(), offsets.Micro()))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
rstebbing/subdivision | verification/doosabin/derivatives_symbolic.py | 1 | 4196 | ##########################################
# File: derivatives_symbolic.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import argparse
import numpy as np
import sympy as sp
from itertools import count
import matplotlib.pyplot as plt
# Note: Module is imported directly for access to
# `NUM_BIQUADRATIC_BSPLINE_BASIS` and other non-exported variables which aren't
# (typically) necessary.
from subdivision.doosabin import doosabin
from common import example_extraordinary_patch
# Requires `rscommon`.
from rscommon.matplotlib_ import set_xaxis_ticks
# Use a Sympy as the backend for `doosabin`.
doosabin.g = sp
# Global symbols.
u, v = sp.symbols('u v')
# recursive_evaluate_basis
def recursive_evaluate_basis(p, b, N, k):
A_ = sp.Matrix(doosabin.bigger_subdivision_matrix(N))
P3 = sp.Matrix(doosabin.picker_matrix(N, 3))
A_Anm1 = A_
for i in count(1):
yield 2**(p * i) * b.T * (
sp.Matrix(doosabin.picker_matrix(N, k)) * A_Anm1)
A_Anm1 = A_ * P3 * A_Anm1
# du_k_0
def du_k_0(N):
# `b` and `p` corresponding to `biquadratic_bspline_du_basis` for
# `v = 0`.
p = 1
b = sp.Matrix([
doosabin.biquadratic_bspline_basis_i(
doosabin.uniform_quadratic_bspline_first_derivative_basis,
doosabin.uniform_quadratic_bspline_position_basis,
u, v, i)
for i in range(doosabin.NUM_BIQUADRATIC_BSPLINE_BASIS)
]).subs({v : 0})
return recursive_evaluate_basis(p, b, N, 0)
# du_du_k_0
def du_du_k_0(N):
# `b` and `p` corresponding to `biquadratic_bspline_du_du_basis`.
p = 2
h = sp.S.Half
b = sp.Matrix([-1, -1, h, h, 0, 0, 0, h, h])
return recursive_evaluate_basis(p, b, N, 0)
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('N', nargs='?', type=int, default=6)
parser.add_argument('n', nargs='?', type=int, default=16)
parser.add_argument('--seed', type=int, default=-1)
args = parser.parse_args()
# Generate example extraordinary patch with an extraordinary face of `N`
# sides.
# Use `seed < 0` to signal evaluating the linear weights only
# (i.e. `X = None`).
print 'N:', args.N
print 'seed:', args.seed
if args.seed >= 0:
X = example_extraordinary_patch(args.N)
np.random.seed(args.seed)
X += 0.1 * np.random.randn(X.size).reshape(X.shape)
print 'X:', X.shape
else:
X = None
print 'X: None'
generators_and_subs = [('biquadratic_bspline_du_basis', du_k_0, {u : 1}),
('biquadratic_bspline_du_du_basis', du_du_k_0, {})]
f, axs = plt.subplots(2, 1)
for ax, (name, g, subs) in zip(axs, generators_and_subs):
print '%s:' % name
# Ignore first subdivision so that the reported results correspond with
# those generated by derivatives_numeric.py (which implicitly skips the
# first subdivision due to the calculation for `n` in
# `transform_u_to_subdivided_patch`).
g = g(args.N)
next(g)
norms = []
for i in range(args.n):
b = next(g).subs(subs)
q = map(np.float64, b)
if X is not None:
q = np.dot(q, X)
n = np.linalg.norm(q)
norms.append(n)
print (' (2^%d, 0) ->' % (-(i + 1))),
if X is not None:
print ('(%+.3e, %+.3e)' % (q[0], q[1])),
print '<%.3e>' % n
ax.plot(norms, 'o-')
for i, n in enumerate(norms):
ax.text(i, n, '$%.3e$' % n, horizontalalignment='center')
set_xaxis_ticks(ax, ['$2^{%d}$' % (-(i + 1)) for i in range(args.n)])
ax.set_yticks([])
axs[0].set_title(r'$|N = %d, \partial u|$' % args.N)
axs[1].set_title(r'$|N = %d, \partial u^2|$' % args.N)
plt.show()
if __name__ == '__main__':
main()
| mit |
lheagy/casingResearch | setup.py | 2 | 1719 | #!/usr/bin/env python
from __future__ import print_function
"""CasingSimulations: Numerical simulations of electromagnetic surveys over
in settings where steel cased wells are present.
"""
from distutils.core import setup
from setuptools import find_packages
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Natural Language :: English',
]
with open("README.rst") as f:
LONG_DESCRIPTION = ''.join(f.readlines())
setup(
name="CasingSimulations",
version="0.1.0",
packages=find_packages(),
install_requires=[
'numpy>=1.7',
'scipy>=0.13',
'cython',
'pymatsolver>=0.1.1',
'ipywidgets',
'jupyter',
'matplotlib',
'properties',
'vectormath',
'SimPEG',
],
author="Lindsey Heagy",
author_email="[email protected]",
description="Casing Simulations: Electromagnetics + Steel Cased Wells",
long_description=LONG_DESCRIPTION,
license="MIT",
keywords="geophysics electromagnetics",
url="http://github.com/simpeg-research/casingResearch",
download_url="http://github.com/simpeg-research/casingResearch",
classifiers=CLASSIFIERS,
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
use_2to3=False
)
| mit |
dinrker/Ray_FEM | Plots/python_scripts/paper1/plot_scaling_NMLA.py | 1 | 2442 | import numpy as np
size = np.array([45.0,
55.0,
65.0,
75.0,
85.0,
95.0,
105.0,
110.0,
120.0,
135.0,
145.0,
155.0,
160.0,
170.0,
185.0,
195.0,
205.0,
215.0,
225.0])
timing_NMLA = np.array([ 2.9972,
2.6158,
8.4915,
9.0110,
8.9726,
9.0912,
9.5124,
9.2398,
9.6367,
10.143,
9.2135,
9.9841,
20.423,
22.343,
22.594,
25.456,
24.850,
21.250,
21.9610])
# timing_LowFact = np.array([12.69,
# 21.37,
# 29.35,
# 41.82,
# 51.83,
# 69.76,
# 81.53,
# 104.58,
# 112.07,
# 124.82,
# 178.83,
# 211.56,
# 216.16,
# 289.90,
# 324.63,
# 282.74,
# 417.06,
# 461.85,
# 469.43 ])
# timing_gauss = np.array([ 1.218088277111111,
# 5.649571164,
# 8.722732030944444,
# 20.264415925555554,
# 34.40783213327778,
# 56.12274424983333,
# 70.80124191294445,
# 94.6494892356111,
# 148.3274009705])
import matplotlib.pyplot as plt
golden = 1.61803398875
width = 6
height = width/golden
fig = plt.figure(figsize=(width, height))
xlabels = size**2;
plt.loglog(xlabels, timing_NMLA, label='Probing', color='b', linewidth=2, linestyle='--', marker='.', markersize=8.0, zorder=2)
#plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
# plt.plt.loglog(xlabels, timing_LowFreq, label='LowFrequency', color='b', linewidth=2, linestyle='--', marker='.', markersize=8.0, zorder=2)
#plt.loglog(size**2, timing_LowFact, label='Setup', color='g', linewidth=2, linestyle='--', marker='o', markersize=8.0, zorder=2)
# plt.loglog(size**2, timing_gauss, label='Gaussian bumps', color='g', linewidth=2, linestyle='--', marker='.', markersize=8.0, zorder=2)
# plt.loglog(xlabels, (xlabels*np.log(xlabels)**3/(xlabels[0]*np.log(xlabels[0])**3))*timing_LowFreq[0]*0.95, label=r'$\mathcal{O}(N \log^2{N})$', color='k', linewidth=2, linestyle='solid', markersize=8.0, zorder=2)
# #plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.loglog(xlabels, (xlabels*np.log(xlabels)/(xlabels[0]*np.log(xlabels[0])))*timing_NMLA[0]*0.95, label=r'$\mathcal{O}(N \log{N})$', color='r', linewidth=2, linestyle='solid', markersize=8.0, zorder=2)
# # plt.loglog(N_x**2, N_x**2 / 4.0e4, label=r' ', color='white', linewidth=0.0)
plt.legend(loc=2, ncol=1, frameon=False, fontsize=14.85)
# plt.title('Normalized run-time for inner loop')
plt.xlabel(r'$N=n^2$', fontsize=18)
plt.ylabel('Time [s]', fontsize=18)
plt.gca().tick_params(labelsize=14)
plt.autoscale(True, 'both', True)
plt.tight_layout(pad=0.2)
fig.savefig('Probing_scaling.pdf')
plt.close('all')
# plt.show()
| mit |
richford/AFQ-Browser | doc/sphinxext/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
shaneknapp/spark | python/pyspark/pandas/generic.py | 2 | 104770 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class of DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections import Counter
from distutils.version import LooseVersion
from functools import reduce
from typing import (
Any,
Callable,
Iterable,
IO,
List,
Optional,
NoReturn,
Tuple,
Union,
TYPE_CHECKING,
cast,
)
import warnings
import numpy as np # noqa: F401
import pandas as pd
from pandas.api.types import is_list_like
from pyspark.sql import Column, functions as F
from pyspark.sql.types import (
BooleanType,
DataType,
DoubleType,
FloatType,
IntegralType,
LongType,
NumericType,
)
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, FrameLike, Scalar
from pyspark.pandas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from pyspark.pandas.internal import InternalFrame
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import spark_type_to_pandas_dtype
from pyspark.pandas.utils import (
is_name_like_tuple,
is_name_like_value,
name_like_string,
scol_for,
sql_conf,
validate_arguments_and_invoke_function,
validate_axis,
SPARK_CONF_ARROW_ENABLED,
)
if TYPE_CHECKING:
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.indexes.base import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.groupby import GroupBy # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
from pyspark.pandas.window import Rolling, Expanding # noqa: F401 (SPARK-34943)
bool_type = bool
class Frame(object, metaclass=ABCMeta):
"""
The base class for both DataFrame and Series.
"""
@abstractmethod
def __getitem__(self, key: Any) -> Any:
pass
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@abstractmethod
def _apply_series_op(
self: FrameLike,
op: Callable[["Series"], Union["Series", Column]],
should_resolve: bool = False,
) -> FrameLike:
pass
@abstractmethod
def _reduce_for_stat_function(
self,
sfun: Union[Callable[[Column], Column], Callable[[Column, DataType], Column]],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any
) -> Union["Series", Scalar]:
pass
@property
@abstractmethod
def dtypes(self) -> Union[pd.Series, Dtype]:
pass
@abstractmethod
def to_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@property
@abstractmethod
def index(self) -> "Index":
pass
@abstractmethod
def copy(self: FrameLike) -> FrameLike:
pass
@abstractmethod
def _to_internal_pandas(self) -> Union[pd.DataFrame, pd.Series]:
pass
@abstractmethod
def head(self: FrameLike, n: int = 5) -> FrameLike:
pass
# TODO: add 'axis' parameter
def cummin(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.min, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cummax(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cum(F.max, skipna), should_resolve=True)
# TODO: add 'axis' parameter
def cumsum(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumsum(skipna), should_resolve=True)
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self: FrameLike, skipna: bool = True) -> FrameLike:
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', pandas-on-Spark's emulates cumulative product by
``exp(sum(log(...)))`` trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ps.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda psser: psser._cumprod(skipna), should_resolve=True)
# TODO: Although this has removed pandas >= 1.0.0, but we're keeping this as deprecated
# since we're using this for `DataFrame.info` internally.
# We can drop it once our minimal pandas version becomes 1.0.0.
def get_dtype_counts(self) -> pd.Series:
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ps.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = list(self.dtypes)
return pd.Series(dict(Counter([d.name for d in dtypes])))
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ps.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ps.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError("%s is both the pipe target and a keyword " "argument" % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self) -> np.ndarray:
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
>>> ps.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to be used.
>>> ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}).to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will have object dtype.
>>> df = ps.DataFrame({"A": [1, 2], "B": [3.0, 4.5], "C": pd.date_range('2000', periods=2)})
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
For Series,
>>> ps.Series(['a', 'b', 'a']).to_numpy()
array(['a', 'b', 'a'], dtype=object)
"""
return self.to_pandas().values
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame or the Series.
.. warning:: We recommend using `DataFrame.to_numpy()` or `Series.to_numpy()` instead.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results in an array of
the same type.
>>> df = ps.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray
of the broadest type that accommodates these mixed types (e.g., object).
>>> df2 = ps.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 'first'),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 'first'],
['monkey', nan, None]], dtype=object)
For Series,
>>> ps.Series([1, 2, 3]).values
array([1, 2, 3])
>>> ps.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
"""
warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
return self.to_numpy()
def to_csv(
self,
path: Optional[str] = None,
sep: str = ",",
na_rep: str = "",
columns: Optional[List[Union[Any, Tuple]]] = None,
header: bool = True,
quotechar: str = '"',
date_format: Optional[str] = None,
escapechar: Optional[str] = None,
num_files: Optional[int] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. note:: pandas-on-Spark `to_csv` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
-------
str or None
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.cummax().to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 US 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ps.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
You can preserve the index in the roundtrip as below.
>>> df.set_index("country", append=True, inplace=True)
>>> df.date.to_csv(
... path=r'%s/to_csv/bar.csv' % path,
... num_files=1,
... index_col=["index1", "index2"])
>>> ps.read_csv(
... path=r'%s/to_csv/bar.csv' % path, index_col=["index1", "index2"]
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
index1 index2
... ... 2012-01-31 12:00:00
... ... 2012-02-29 12:00:00
... ... 2012-03-31 12:00:00
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if path is None:
# If path is none, just collect and use pandas's to_csv.
psdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and isinstance(
self, ps.Series
):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
header=header,
date_format=date_format,
index=False,
)
else:
return psdf_or_ser.to_pandas().to_csv( # type: ignore
None,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
quotechar=quotechar,
date_format=date_format,
escapechar=escapechar,
index=False,
)
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
if columns is None:
column_labels = psdf._internal.column_labels
else:
column_labels = []
for label in columns:
if not is_name_like_tuple(label):
label = (label,)
if label not in psdf._internal.column_labels:
raise KeyError(name_like_string(label))
column_labels.append(label)
if isinstance(index_col, str):
index_cols = [index_col]
elif index_col is None:
index_cols = []
else:
index_cols = index_col
if header is True and psdf._internal.column_labels_level > 1:
raise ValueError("to_csv only support one-level index column now")
elif isinstance(header, list):
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label)).alias(
new_name
)
for i, (label, new_name) in enumerate(zip(column_labels, header))
]
)
header = True
else:
sdf = psdf.to_spark(index_col) # type: ignore
sdf = sdf.select(
[scol_for(sdf, name_like_string(label)) for label in index_cols]
+ [
scol_for(sdf, str(i) if label is None else name_like_string(label))
for i, label in enumerate(column_labels)
]
)
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(
sep=sep,
nullValue=na_rep,
header=header,
quote=quotechar,
dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar,
)
builder.options(**options).format("csv").save(path)
return None
def to_json(
self,
path: Optional[str] = None,
compression: str = "uncompressed",
num_files: Optional[int] = None,
mode: str = "overwrite",
orient: str = "records",
lines: bool = True,
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> Optional[str]:
"""
Convert the object to a JSON string.
.. note:: pandas-on-Spark `to_json` writes files to a path or URI. Unlike pandas',
pandas-on-Spark respects HDFS's property such as 'fs.default.name'.
.. note:: pandas-on-Spark writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
lines : bool, default True
If ‘orient’ is ‘records’ write out line delimited json format.
Will throw ValueError if incorrect ‘orient’ since others are not
list like. It should be always True for now.
orient : str, default 'records'
It should be always 'records' for now.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Returns
--------
str or None
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/to_json/foo.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1
index
0 a
1 c
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
if orient != "records":
raise NotImplementedError("orient='records' is supported only for now.")
if path is None:
# If path is none, just collect and use pandas's to_json.
psdf_or_ser = self
pdf = psdf_or_ser.to_pandas() # type: ignore
if isinstance(self, ps.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient="records")
psdf = self
if isinstance(self, ps.Series):
psdf = self.to_frame()
sdf = psdf.to_spark(index_col=index_col) # type: ignore
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("json").save(path)
return None
def to_excel(
self,
excel_writer: Union[str, pd.ExcelWriter],
sheet_name: str = "Sheet1",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Union[str, List[str]]] = None,
header: bool = True,
index: bool = True,
index_label: Optional[Union[str, List[str]]] = None,
startrow: int = 0,
startcol: int = 0,
engine: Optional[str] = None,
merge_cells: bool = True,
encoding: Optional[str] = None,
inf_rep: str = "inf",
verbose: bool = True,
freeze_panes: Optional[Tuple[int, int]] = None,
) -> None:
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ps.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
if isinstance(self, ps.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ps.Series):
f = pd.Series.to_excel
else:
raise TypeError(
"Constructor expects DataFrame or Series; however, " "got [%s]" % (self,)
)
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_excel, f, args
)
def mean(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def mean(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.mean(spark_column)
return self._reduce_for_stat_function(
mean, name="mean", axis=axis, numeric_only=numeric_only
)
def sum(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, np.nan, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.4
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.0
2 3.3
3 0.0
dtype: float64
>>> df.sum(min_count=3)
a 6.0
b NaN
dtype: float64
>>> df.sum(axis=1, min_count=1)
0 1.1
1 2.0
2 3.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].sum()
6.0
>>> df['a'].sum(min_count=3)
6.0
>>> df['b'].sum(min_count=3)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def sum(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(F.sum(spark_column), SF.lit(0))
return self._reduce_for_stat_function(
sum, name="sum", axis=axis, numeric_only=numeric_only, min_count=min_count
)
def product(
self, axis: Optional[Axis] = None, numeric_only: bool = None, min_count: int = 0
) -> Union[Scalar, "Series"]:
"""
Return the product of the values.
.. note:: unlike pandas', pandas-on-Spark's emulates product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
Examples
--------
On a DataFrame:
Non-numeric type column is not included to the result.
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [10, 20, 30, 40, 50],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> psdf
A B C
0 1 10 a
1 2 20 b
2 3 30 c
3 4 40 d
4 5 50 e
>>> psdf.prod()
A 120
B 12000000
dtype: int64
If there is no numeric type columns, returns empty Series.
>>> ps.DataFrame({"key": ['a', 'b', 'c'], "val": ['x', 'y', 'z']}).prod()
Series([], dtype: float64)
On a Series:
>>> ps.Series([1, 2, 3, 4, 5]).prod()
120
By default, the product of an empty or all-NA Series is ``1``
>>> ps.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> ps.Series([]).prod(min_count=1)
nan
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
def prod(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
scol = F.min(F.coalesce(spark_column, SF.lit(True))).cast(LongType())
elif isinstance(spark_type, NumericType):
num_zeros = F.sum(F.when(spark_column == 0, 1).otherwise(0))
sign = F.when(
F.sum(F.when(spark_column < 0, 1).otherwise(0)) % 2 == 0, 1
).otherwise(-1)
scol = F.when(num_zeros > 0, 0).otherwise(
sign * F.exp(F.sum(F.log(F.abs(spark_column))))
)
if isinstance(spark_type, IntegralType):
scol = F.round(scol).cast(LongType())
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.coalesce(scol, SF.lit(1))
return self._reduce_for_stat_function(
prod, name="prod", axis=axis, numeric_only=numeric_only, min_count=min_count
)
prod = product
def skew(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def skew(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.skewness(spark_column)
return self._reduce_for_stat_function(
skew, name="skew", axis=axis, numeric_only=numeric_only
)
def kurtosis(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def kurtosis(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return F.kurtosis(spark_column)
return self._reduce_for_stat_function(
kurtosis, name="kurtosis", axis=axis, numeric_only=numeric_only
)
kurt = kurtosis
def min(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.min, name="min", axis=axis, numeric_only=numeric_only
)
def max(
self, axis: Optional[Axis] = None, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility. False is supported; however, the columns should
be all numeric or all non-numeric.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
elif numeric_only is True and axis == 1:
numeric_only = None
return self._reduce_for_stat_function(
F.max, name="max", axis=axis, numeric_only=numeric_only
)
def count(
self, axis: Optional[Axis] = None, numeric_only: bool = False
) -> Union[Scalar, "Series"]:
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
numeric_only : bool, default False
If True, include only float, int, boolean columns. This parameter is mainly for
pandas compatibility.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
See Also
--------
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ps.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
dtype: int64
On a Series:
>>> df['Person'].count()
5
>>> df['Age'].count()
4
"""
return self._reduce_for_stat_function(
Frame._count_expr, name="count", axis=axis, numeric_only=numeric_only
)
def std(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
dtype: float64
>>> df.std(ddof=0)
a 0.816497
b 0.081650
dtype: float64
On a Series:
>>> df['a'].std()
1.0
>>> df['a'].std(ddof=0)
0.816496580927726
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
return self._reduce_for_stat_function(
std, name="std", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def var(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
dtype: float64
>>> df.var(ddof=0)
a 0.666667
b 0.006667
dtype: float64
On a Series:
>>> df['a'].var()
1.0
>>> df['a'].var(ddof=0)
0.6666666666666666
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def var(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.var_pop(spark_column)
else:
return F.var_samp(spark_column)
return self._reduce_for_stat_function(
var, name="var", axis=axis, numeric_only=numeric_only, ddof=ddof
)
def median(
self, axis: Optional[Axis] = None, numeric_only: bool = None, accuracy: int = 10000
) -> Union[Scalar, "Series"]:
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ps.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['b'] + 100).median()
103.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
dtype: float64
>>> df.median(axis=1)
0 12.5
1 11.5
2 14.0
3 18.5
4 15.5
dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('y', 'b')] + 100).median()
103.0
"""
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
def median(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
return self._reduce_for_stat_function(
median, name="median", numeric_only=numeric_only, axis=axis
)
def sem(
self, axis: Optional[Axis] = None, ddof: int = 1, numeric_only: bool = None
) -> Union[Scalar, "Series"]:
"""
Return unbiased standard error of the mean over requested axis.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility.
Returns
-------
scalar(for Series) or Series(for DataFrame)
Examples
--------
>>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
>>> psdf
a b
0 1 4
1 2 5
2 3 6
>>> psdf.sem()
a 0.57735
b 0.57735
dtype: float64
>>> psdf.sem(ddof=0)
a 0.471405
b 0.471405
dtype: float64
>>> psdf.sem(axis=1)
0 1.5
1 1.5
2 1.5
dtype: float64
Support for Series
>>> psser = psdf.a
>>> psser
0 1
1 2
2 3
Name: a, dtype: int64
>>> psser.sem()
0.5773502691896258
>>> psser.sem(ddof=0)
0.47140452079103173
"""
assert ddof in (0, 1)
axis = validate_axis(axis)
if numeric_only is None and axis == 0:
numeric_only = True
def std(spark_column: Column, spark_type: DataType) -> Column:
if isinstance(spark_type, BooleanType):
spark_column = spark_column.cast(LongType())
elif not isinstance(spark_type, NumericType):
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if ddof == 0:
return F.stddev_pop(spark_column)
else:
return F.stddev_samp(spark_column)
def sem(spark_column: Column, spark_type: DataType) -> Column:
return std(spark_column, spark_type) / pow(
Frame._count_expr(spark_column, spark_type), 0.5
)
return self._reduce_for_stat_function(
sem, name="sem", numeric_only=numeric_only, axis=axis, ddof=ddof
)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ps.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
6
>>> df = ps.DataFrame(index=[1, 2, None])
>>> df.size
0
"""
num_columns = len(self._internal.data_spark_columns)
if num_columns == 0:
return 0
else:
return len(self) * num_columns # type: ignore
def abs(self: FrameLike) -> FrameLike:
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ps.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ps.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
def abs(psser: "Series") -> Union["Series", Column]:
if isinstance(psser.spark.data_type, BooleanType):
return psser
elif isinstance(psser.spark.data_type, NumericType):
return psser._with_new_scol(
F.abs(psser.spark.column), field=psser._internal.data_fields[0]
)
else:
raise TypeError(
"bad operand type for abs(): {} ({})".format(
spark_type_to_pandas_dtype(psser.spark.data_type),
psser.spark.data_type.simpleString(),
)
)
return self._apply_series_op(abs)
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(
self: FrameLike,
by: Union[Any, Tuple, "Series", List[Union[Any, Tuple, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "GroupBy[FrameLike]":
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
dropna : bool, default True
If True, and if group keys contain NA values,
NA values together with row/column will be dropped.
If False, NA values will also be treated as the key in groups.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
pyspark.pandas.groupby.GroupBy
Examples
--------
>>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
We can also choose to include NA in group keys or not by setting dropna parameter,
the default setting is True:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = ps.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
"""
if isinstance(by, ps.DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
elif isinstance(by, ps.Series):
new_by = [by] # type: List[Union[Tuple, ps.Series]]
elif is_name_like_tuple(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [cast(Tuple, by)]
elif is_name_like_value(by):
if isinstance(self, ps.Series):
raise KeyError(by)
new_by = [(by,)]
elif is_list_like(by):
new_by = []
for key in by:
if isinstance(key, ps.DataFrame):
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
elif isinstance(key, ps.Series):
new_by.append(key)
elif is_name_like_tuple(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append(key)
elif is_name_like_value(key):
if isinstance(self, ps.Series):
raise KeyError(key)
new_by.append((key,))
else:
raise ValueError(
"Grouper for '{}' not 1-dimensional".format(type(key).__name__)
)
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__))
if not len(new_by):
raise ValueError("No group keys passed!")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
@abstractmethod
def _build_groupby(
self: FrameLike, by: List[Union["Series", Tuple]], as_index: bool, dropna: bool
) -> "GroupBy[FrameLike]":
pass
def bool(self) -> bool:
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Returns
--------
bool
Examples
--------
>>> ps.DataFrame({'a': [True]}).bool()
True
>>> ps.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ps.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ps.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ps.DataFrame):
df = self
elif isinstance(self, ps.Series):
df = self.to_dataframe()
else:
raise TypeError("bool() expects DataFrame or Series; however, " "got [%s]" % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Retrieves the index of the first valid value.
Returns
-------
scalar, tuple, or None
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> psdf.first_valid_index()
'W'
Support for Series.
>>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
with sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
# Disable Arrow to keep row ordering.
first_valid_row = cast(
pd.DataFrame,
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.limit(1)
.toPandas(),
)
# For Empty Series or DataFrame, returns None.
if len(first_valid_row) == 0:
return None
first_valid_row = first_valid_row.iloc[0]
if len(first_valid_row) == 1:
return first_valid_row.iloc[0]
else:
return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[Scalar, Tuple[Scalar, ...]]]:
"""
Return index for last non-NA/null value.
Returns
-------
scalar, tuple, or None
Notes
-----
This API only works with PySpark >= 3.0.
Examples
--------
Support for DataFrame
>>> psdf = ps.DataFrame({'a': [1, 2, 3, None],
... 'b': [1.0, 2.0, 3.0, None],
... 'c': [100, 200, 400, None]},
... index=['Q', 'W', 'E', 'R'])
>>> psdf
a b c
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for MultiIndex columns
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
Q 1.0 1.0 100.0
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R NaN NaN NaN
>>> psdf.last_valid_index() # doctest: +SKIP
'E'
Support for Series.
>>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])
>>> s
100 1.0
200 2.0
300 3.0
400 NaN
500 NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)
>>> s
lama speed 250.0
weight 1.5
length 320.0
cow speed 1.0
weight 0.3
length NaN
falcon speed NaN
weight NaN
length NaN
dtype: float64
>>> s.last_valid_index() # doctest: +SKIP
('cow', 'weight')
"""
data_spark_columns = self._internal.data_spark_columns
if len(data_spark_columns) == 0:
return None
cond = reduce(lambda x, y: x & y, map(lambda x: x.isNotNull(), data_spark_columns))
last_valid_rows = (
self._internal.spark_frame.filter(cond)
.select(self._internal.index_spark_columns)
.tail(1)
)
# For Empty Series or DataFrame, returns None.
if len(last_valid_rows) == 0:
return None
last_valid_row = last_valid_rows[0]
if len(last_valid_row) == 1:
return last_valid_row[0]
else:
return tuple(last_valid_row)
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(
self: FrameLike, window: int, min_periods: Optional[int] = None
) -> "Rolling[FrameLike]":
"""
Provide rolling transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Rolling
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/pyspark.pandas/pull/607
def expanding(self: FrameLike, min_periods: int = 1) -> "Expanding[FrameLike]":
"""
Provide expanding transformations.
.. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
from pyspark.pandas.window import Expanding
return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any] = None) -> Any:
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def squeeze(self, axis: Optional[Axis] = None) -> Union[Scalar, "DataFrame", "Series"]:
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = ps.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_1a = df.loc[[1], ['a']]
>>> df_1a
a
1 3
Squeezing the rows produces a single scalar Series:
>>> df_1a.squeeze('rows')
a 3
Name: 1, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_1a.squeeze()
3
"""
if axis is not None:
axis = "index" if axis == "rows" else axis
axis = validate_axis(axis)
if isinstance(self, ps.DataFrame):
from pyspark.pandas.series import first_series
is_squeezable = len(self.columns[:2]) == 1
# If DataFrame has multiple columns, there is no change.
if not is_squeezable:
return self
series_from_column = first_series(self)
has_single_value = len(series_from_column.head(2)) == 1
# If DataFrame has only a single value, use pandas API directly.
if has_single_value:
result = self._to_internal_pandas().squeeze(axis)
return ps.Series(result) if isinstance(result, pd.Series) else result
elif axis == 0:
return self
else:
return series_from_column
else:
# The case of Series is simple.
# If Series has only a single value, just return it as a scalar.
# Otherwise, there is no change.
self_top_two = cast("Series", self).head(2)
has_single_value = len(self_top_two) == 1
return cast(Union[Scalar, ps.Series], self_top_two[0] if has_single_value else self)
def truncate(
self,
before: Optional[Any] = None,
after: Optional[Any] = None,
axis: Optional[Axis] = None,
copy: bool_type = True,
) -> DataFrameOrSeries:
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
.. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`
which can be expensive.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Examples
--------
>>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
A Series has index that sorted integers.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=[1, 2, 3, 4, 5, 6, 7])
>>> s
1 10
2 20
3 30
4 40
5 50
6 60
7 70
dtype: int64
>>> s.truncate(2, 5)
2 20
3 30
4 40
5 50
dtype: int64
A Series has index that sorted strings.
>>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],
... index=['a', 'b', 'c', 'd', 'e', 'f', 'g'])
>>> s
a 10
b 20
c 30
d 40
e 50
f 60
g 70
dtype: int64
>>> s.truncate('b', 'e')
b 20
c 30
d 40
e 50
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
indexes = self.index
indexes_increasing = indexes.is_monotonic_increasing
if not indexes_increasing and not indexes.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if (before is None) and (after is None):
return cast(Union[ps.DataFrame, ps.Series], self.copy() if copy else self)
if (before is not None and after is not None) and before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
if isinstance(self, ps.Series):
if indexes_increasing:
result = first_series(self.to_frame().loc[before:after]).rename(self.name)
else:
result = first_series(self.to_frame().loc[after:before]).rename(self.name)
elif isinstance(self, ps.DataFrame):
if axis == 0:
if indexes_increasing:
result = self.loc[before:after]
else:
result = self.loc[after:before]
elif axis == 1:
result = self.loc[:, before:after]
return cast(DataFrameOrSeries, result.copy() if copy else result)
def to_markdown(
self, buf: Optional[Union[IO[str], str]] = None, mode: Optional[str] = None
) -> str:
"""
Print Series or DataFrame in Markdown-friendly format.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
mode : str, optional
Mode in which file is opened.
**kwargs
These parameters will be passed to `tabulate`.
Returns
-------
str
Series or DataFrame in Markdown-friendly format.
Notes
-----
Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.
Examples
--------
>>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")
>>> print(psser.to_markdown()) # doctest: +SKIP
| | animal |
|---:|:---------|
| 0 | elk |
| 1 | pig |
| 2 | dog |
| 3 | quetzal |
>>> psdf = ps.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(psdf.to_markdown()) # doctest: +SKIP
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
# `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0.
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
raise NotImplementedError(
"`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0"
)
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psser_or_psdf = self
internal_pandas = psser_or_psdf._to_internal_pandas()
return validate_arguments_and_invoke_function(
internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args
)
@abstractmethod
def fillna(
self: FrameLike,
value: Optional[Any] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
pass
# TODO: add 'downcast' when value parameter exists
def bfill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> psdf.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
For Series
>>> psser = ps.Series([None, None, None, 1])
>>> psser
0 NaN
1 NaN
2 NaN
3 1.0
dtype: float64
>>> psser.bfill()
0 1.0
1 1.0
2 1.0
3 1.0
dtype: float64
"""
return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit)
backfill = bfill
# TODO: add 'downcast' when value parameter exists
def ffill(
self: FrameLike,
axis: Optional[Axis] = None,
inplace: bool_type = False,
limit: Optional[int] = None,
) -> FrameLike:
"""
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffill' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame or Series
DataFrame or Series with NA entries filled.
Examples
--------
>>> psdf = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> psdf
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> psdf.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
For Series
>>> psser = ps.Series([2, 4, None, 3])
>>> psser
0 2.0
1 4.0
2 NaN
3 3.0
dtype: float64
>>> psser.ffill()
0 2.0
1 4.0
2 4.0
3 3.0
dtype: float64
"""
return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit)
pad = ffill
@property
def at(self) -> AtIndexer:
return AtIndexer(self) # type: ignore
at.__doc__ = AtIndexer.__doc__
@property
def iat(self) -> iAtIndexer:
return iAtIndexer(self) # type: ignore
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self) -> iLocIndexer:
return iLocIndexer(self) # type: ignore
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self) -> LocIndexer:
return LocIndexer(self) # type: ignore
loc.__doc__ = LocIndexer.__doc__
def __bool__(self) -> NoReturn:
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(self.__class__.__name__)
)
@staticmethod
def _count_expr(spark_column: Column, spark_type: DataType) -> Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(spark_column, SF.lit(None)))
else:
return F.count(spark_column)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
from pyspark.sql import SparkSession
import pyspark.pandas.generic
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.generic.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.generic tests")
.getOrCreate()
)
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.generic,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
KasperPRasmussen/bokeh | bokeh/models/sources.py | 6 | 9916 | from __future__ import absolute_import
from ..core import validation
from ..core.validation.errors import COLUMN_LENGTHS
from ..core.properties import abstract
from ..core.properties import Any, Int, String, Instance, List, Dict, Bool, Enum, JSON
from ..model import Model
from ..util.dependencies import import_optional
from ..util.deprecate import deprecated
from ..util.serialization import transform_column_source_data
from .callbacks import Callback
pd = import_optional('pandas')
@abstract
class DataSource(Model):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
selected = Dict(String, Dict(String, Any), default={
'0d': {'glyph': None, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': []}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
- 0d: indicates whether a Line or Patch glyphs have been hit. Value is a
dict with the following keys:
- flag (boolean): true if glyph was with false otherwise
- indices (list): indices hit (if applicable)
- 1d: indicates whether any of all other glyph (except [multi]line or
patches) was hit:
- indices (list): indices that were hit/selected
- 2d: indicates whether a [multi]line or patches) were hit:
- indices (list(list)): indices of the lines/patches that were
hit/selected
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single argument that
is a dict or pandas.DataFrame, that argument is used as the value for the
"data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
ColumnDataSource(df) # same as ColumnDataSource(data=df)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
for name, data in raw_data.items():
self.add(data, name)
@staticmethod
def _data_from_df(df):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = df.index
new_data = {}
for colname in df:
new_data[colname] = df[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
@classmethod
@deprecated("Bokeh 0.9.3", "ColumnDataSource initializer")
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
import warnings
warnings.warn("Method deprecated in Bokeh 0.9.3")
return cls._data_from_df(data)
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
""" Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name go the form "Series ####"
Returns:
str: the column name used
"""
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def _to_json_like(self, include_defaults):
attrs = super(ColumnDataSource, self)._to_json_like(include_defaults=include_defaults)
if 'data' in attrs:
attrs['data'] = transform_column_source_data(attrs['data'])
return attrs
def remove(self, name):
""" Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
"""
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
@deprecated("Bokeh 0.11.0", "bokeh.io.push_notebook")
def push_notebook(self):
""" Update a data source for a plot in a Jupyter notebook.
This function can be be used to update data in plot data sources
in the Jupyter notebook, without having to use the Bokeh server.
.. warning::
This function has been deprecated. Please use
``bokeh.io.push_notebook()`` which will push all changes
(not just data sources) to the last shown plot in a Jupyter
notebook.
Returns:
None
"""
from bokeh.io import push_notebook
push_notebook()
@validation.error(COLUMN_LENGTHS)
def _check_column_lengths(self):
lengths = set(len(x) for x in self.data.values())
if len(lengths) > 1:
return str(self)
def stream(self, new_data, rollover=None):
import numpy as np
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError("Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra))))
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
lengths = set()
for x in new_data.values():
if isinstance(x, np.ndarray):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
self.data._stream(self.document, self, new_data, rollover)
class GeoJSONDataSource(ColumnDataSource):
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently GeoJSONDataSource can
only process a FeatureCollection or GeometryCollection.
""")
@abstract
class RemoteSource(ColumnDataSource):
data_url = String(help="""
The URL to the endpoint for the data.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
HTTP headers to set for the Ajax request.
""")
| bsd-3-clause |
xubenben/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
bmcfee/librosa | tests/test_display.py | 2 | 25590 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# CREATED:2015-02-14 22:51:01 by Brian McFee <[email protected]>
"""Unit tests for display module"""
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except KeyError:
pass
from packaging import version
import pytest
matplotlib = pytest.importorskip("matplotlib", minversion="3.4")
matplotlib.use("Agg")
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
import matplotlib.style
matplotlib.style.use("seaborn-ticks")
import matplotlib.pyplot as plt
import librosa
import librosa.display
import numpy as np
# Workaround for old freetype builds with our image fixtures
FT_VERSION = version.parse(matplotlib.ft2font.__freetype_version__)
OLD_FT = not (FT_VERSION >= version.parse("2.10"))
@pytest.fixture
def audio():
__EXAMPLE_FILE = os.path.join("tests", "data", "test1_22050.wav")
y, sr = librosa.load(__EXAMPLE_FILE)
return y, sr
@pytest.fixture
def y(audio):
return audio[0]
@pytest.fixture
def sr(audio):
return audio[1]
@pytest.fixture
def S(y):
return librosa.stft(y)
@pytest.fixture
def S_abs(S):
return np.abs(S)
@pytest.fixture
def C(y, sr):
return np.abs(librosa.cqt(y, sr=sr))
@pytest.fixture
def S_signed(S):
return np.abs(S) - np.median(np.abs(S))
@pytest.fixture
def S_bin(S_signed):
return S_signed > 0
@pytest.fixture
def rhythm(y, sr):
return librosa.beat.beat_track(y=y, sr=sr)
@pytest.fixture
def tempo(rhythm):
return rhythm[0]
@pytest.fixture
def beats(rhythm, C):
return librosa.util.fix_frames(rhythm[1], x_max=C.shape[1])
@pytest.fixture
def beat_t(beats, sr):
return librosa.frames_to_time(beats, sr)
@pytest.fixture
def Csync(C, beats):
return librosa.util.sync(C, beats, aggregate=np.median)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_unknown_time_unit(y):
times = np.arange(len(y))
plt.figure()
ax = plt.gca()
ax.plot(times, y)
ax.xaxis.set_major_formatter(
librosa.display.TimeFormatter(unit="neither s, nor ms, nor None")
)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["complex"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_complex_input(S):
plt.figure()
librosa.display.specshow(S)
return plt.gcf()
@pytest.mark.mpl_image_compare(baseline_images=["abs"], extensions=["png"], tolerance=6)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_abs_input(S_abs):
plt.figure()
librosa.display.specshow(S_abs)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["cqt_note"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_cqt_note(C):
plt.figure()
librosa.display.specshow(C, y_axis="cqt_note")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["fft_note"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_fft_note(S_abs):
plt.figure()
librosa.display.specshow(S_abs, y_axis="fft_note")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["cqt_hz"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_cqt_hz(C):
plt.figure()
librosa.display.specshow(C, y_axis="cqt_hz")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["tempo"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_tempo(y, sr):
T = librosa.feature.tempogram(y=y, sr=sr)
plt.figure()
librosa.display.specshow(T, y_axis="tempo", cmap="magma")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["fourier_tempo"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_fourier_tempo(y, sr):
T = librosa.feature.fourier_tempogram(y=y, sr=sr)
plt.figure()
librosa.display.specshow(np.abs(T), y_axis="fourier_tempo", cmap="magma")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["tonnetz"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_tonnetz(C):
plt.figure()
chroma = librosa.feature.chroma_cqt(C=C)
ton = librosa.feature.tonnetz(chroma=chroma)
librosa.display.specshow(ton, y_axis="tonnetz")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["chroma"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_chroma(S_abs, sr):
plt.figure()
plt.subplot(3, 1, 1)
chr1 = librosa.feature.chroma_stft(S=S_abs ** 2, sr=sr)
librosa.display.specshow(chr1, y_axis="chroma")
plt.subplot(3, 1, 2)
chr2 = librosa.feature.chroma_stft(S=S_abs ** 2, sr=sr, n_chroma=2 * 12)
librosa.display.specshow(chr2, y_axis="chroma", bins_per_octave=2 * 12)
plt.subplot(3, 1, 3)
chr3 = librosa.feature.chroma_stft(S=S_abs ** 2, sr=sr, n_chroma=3 * 12)
librosa.display.specshow(chr3, y_axis="chroma", bins_per_octave=3 * 12)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["chroma_svara"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_chroma_svara(C, sr):
chroma = librosa.feature.chroma_cqt(C=C, sr=sr, threshold=0.9)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=4, sharex=True, figsize=(10, 10))
# Hindustani, no thaat
librosa.display.specshow(chroma, y_axis="chroma_h", Sa=5, ax=ax1)
# Hindustani, kafi thaat
librosa.display.specshow(chroma, y_axis="chroma_h", Sa=5, ax=ax2, thaat="kafi")
# Carnatic, mela 22
librosa.display.specshow(chroma, y_axis="chroma_c", Sa=5, ax=ax3, mela=22)
# Carnatic, mela 1
librosa.display.specshow(chroma, y_axis="chroma_c", Sa=7, ax=ax4, mela=1)
ax1.label_outer()
ax2.label_outer()
ax3.label_outer()
return fig
@pytest.mark.mpl_image_compare(
baseline_images=["double_chroma"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_double_chroma(S_abs, sr):
plt.figure()
chr1 = librosa.feature.chroma_stft(S=S_abs ** 2, sr=sr)
chr1 = np.vstack((chr1, chr1))
librosa.display.specshow(chr1, y_axis="chroma", bins_per_octave=12)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_mel"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_x_mel(S_abs):
plt.figure()
M = librosa.feature.melspectrogram(S=S_abs ** 2)
librosa.display.specshow(M.T, x_axis="mel")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["y_mel"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_y_mel(S_abs):
plt.figure()
M = librosa.feature.melspectrogram(S=S_abs ** 2)
librosa.display.specshow(M, y_axis="mel")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["y_mel_bounded"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_y_mel_bounded(S_abs):
plt.figure()
fmin, fmax = 110, 880
M = librosa.feature.melspectrogram(S=S_abs ** 2, fmin=fmin, fmax=fmax)
librosa.display.specshow(M, y_axis="mel", fmin=fmin, fmax=fmax)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_none_y_linear"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_none_yaxis_linear(S_abs, S_signed, S_bin):
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs, y_axis="linear")
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed, y_axis="fft")
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin, y_axis="hz")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["specshow_ext_axes"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_specshow_ext_axes(S_abs):
plt.figure()
ax_left = plt.subplot(1, 2, 1)
ax_right = plt.subplot(1, 2, 2)
# implicitly ax_right
librosa.display.specshow(S_abs, cmap="gray")
librosa.display.specshow(S_abs, cmap="magma", ax=ax_left)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_none_y_log"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_none_yaxis_log(S_abs, S_signed, S_bin):
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs, y_axis="log")
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed, y_axis="log")
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin, y_axis="log")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_linear_y_none"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_linear_yaxis_none(S_abs, S_signed, S_bin):
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs.T, x_axis="linear")
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed.T, x_axis="fft")
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin.T, x_axis="hz")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_log_y_none"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_log_yaxis_none(S_abs, S_signed, S_bin):
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.specshow(S_abs.T, x_axis="log")
plt.subplot(3, 1, 2)
librosa.display.specshow(S_signed.T, x_axis="log")
plt.subplot(3, 1, 3)
librosa.display.specshow(S_bin.T, x_axis="log")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_time_y_none"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_time_yaxis_none(S_abs):
plt.figure()
librosa.display.specshow(S_abs, x_axis="time")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_none_y_time"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_none_yaxis_time(S_abs):
plt.figure()
librosa.display.specshow(S_abs.T, y_axis="time")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_frames_y_none"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_frames_yaxis_none(S_abs):
plt.figure()
librosa.display.specshow(S_abs, x_axis="frames")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_none_y_frames"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_none_yaxis_frames(S_abs):
plt.figure()
librosa.display.specshow(S_abs.T, y_axis="frames")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_lag_y_none"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_lag_yaxis_none(S_abs):
plt.figure()
librosa.display.specshow(S_abs, x_axis="lag")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["x_none_y_lag"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_xaxis_time_yaxis_lag(S_abs):
plt.figure()
librosa.display.specshow(S_abs.T, y_axis="lag")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["time_scales_auto"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_time_scales_auto(S_abs, sr):
# sr = 22050, hop_length = 512, S.shape[1] = 198
# 197 * 512 / 22050 ~= 4.6s
plt.figure(figsize=(10, 10))
plt.subplot(4, 1, 1)
# sr * 10 -> ms
librosa.display.specshow(S_abs, sr=10 * sr, x_axis="time")
plt.subplot(4, 1, 2)
# sr -> s
librosa.display.specshow(S_abs, sr=sr, x_axis="time")
plt.subplot(4, 1, 3)
# sr / 20 -> m
librosa.display.specshow(S_abs, sr=sr // 20, x_axis="time")
plt.subplot(4, 1, 4)
# sr / (60 * 20) -> h
librosa.display.specshow(S_abs, sr=sr // (60 * 20), x_axis="time")
plt.tight_layout()
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["time_unit"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_time_unit(S_abs, sr):
# sr = 22050, hop_length = 512, S.shape[1] = 198
# 197 * 512 / 22050 ~= 4.6s
plt.figure(figsize=(9, 10))
plt.subplot(3, 1, 1)
# time scale auto
librosa.display.specshow(S_abs, sr=sr, x_axis="time")
plt.subplot(3, 1, 2)
# time unit fixed to 's'
librosa.display.specshow(S_abs, sr=sr, x_axis="s")
plt.subplot(3, 1, 3)
# time unit fixed to 'ms'
librosa.display.specshow(S_abs, sr=sr, x_axis="ms")
plt.tight_layout()
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["time_unit_lag"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_time_unit_lag(S_abs, sr):
plt.figure(figsize=(9, 10))
plt.subplot(3, 1, 1)
# time scale auto in lag mode
librosa.display.specshow(S_abs, sr=sr, x_axis="lag")
plt.subplot(3, 1, 2)
# time unit fixed to 's' in lag mode
librosa.display.specshow(S_abs, sr=sr, x_axis="lag_s")
plt.subplot(3, 1, 3)
# time unit fixed to 'ms' in lag mode
librosa.display.specshow(S_abs, sr=sr, x_axis="lag_ms")
plt.tight_layout()
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["waveplot_mono"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveplot_mono(y, sr):
plt.figure()
plt.subplot(3, 1, 1)
librosa.display.waveplot(y, sr=sr, max_points=None, x_axis="off")
plt.subplot(3, 1, 2)
librosa.display.waveplot(y, sr=sr, x_axis="off")
plt.subplot(3, 1, 3)
librosa.display.waveplot(y, sr=sr, x_axis="time")
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["waveshow_mono"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveshow_mono(y, sr):
fig, ax = plt.subplots(nrows=1)
librosa.display.waveshow(y, sr=sr, ax=ax)
return fig
@pytest.mark.mpl_image_compare(
baseline_images=["waveshow_mono_zoom"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveshow_mono_zoom(y, sr):
fig, ax = plt.subplots(nrows=1)
out = librosa.display.waveshow(y, sr=sr, ax=ax, max_points=sr // 2)
# Zoom into 1/8 of a second, make sure it's out of the initial viewport
ax.set(xlim=[1, 1.125])
return fig
@pytest.mark.mpl_image_compare(
baseline_images=["waveshow_mono_zoom_out"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveshow_mono_zoom_out(y, sr):
fig, ax = plt.subplots(nrows=1)
out = librosa.display.waveshow(y, sr=sr, ax=ax, max_points=sr // 2)
# Zoom into 1/8 of a second, make sure it's out of the initial viewport
ax.set(xlim=[1, 1.125])
# Zoom back out to get an envelope view again
ax.set(xlim=[0, 1])
return fig
@pytest.mark.mpl_image_compare(
baseline_images=["waveplot_ext_axes"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveplot_ext_axes(y):
plt.figure()
ax_left = plt.subplot(1, 2, 1)
ax_right = plt.subplot(1, 2, 2)
# implicitly ax_right
librosa.display.waveplot(y, color="blue")
librosa.display.waveplot(y, color="red", ax=ax_left)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["waveshow_ext_axes"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveshow_ext_axes(y):
plt.figure()
ax_left = plt.subplot(1, 2, 1)
ax_right = plt.subplot(1, 2, 2)
# implicitly ax_right
librosa.display.waveshow(y, color="blue")
librosa.display.waveshow(y, color="red", ax=ax_left)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["waveplot_stereo"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveplot_stereo(y, sr):
ys = librosa.util.stack([y, 2 * y])
plt.figure()
librosa.display.waveplot(ys, sr=sr)
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["waveshow_stereo"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_waveshow_stereo(y, sr):
ys = librosa.util.stack([y, 2 * y])
plt.figure()
librosa.display.waveshow(ys, sr=sr)
return plt.gcf()
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_unknown_wavaxis(y, sr):
plt.figure()
librosa.display.waveshow(y, sr=sr, x_axis="something not in the axis map")
return plt.gcf()
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_waveshow_unknown_wavaxis(y, sr):
plt.figure()
librosa.display.waveplot(y, sr=sr, x_axis="something not in the axis map")
return plt.gcf()
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_waveplot_bad_maxsr(y, sr):
plt.figure()
librosa.display.waveplot(y, sr=sr, max_sr=0)
return plt.gcf()
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_waveshow_bad_maxpoints(y, sr):
plt.figure()
librosa.display.waveshow(y, sr=sr, max_points=0)
return plt.gcf()
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_waveplot_bad_maxpoints(y, sr):
plt.figure()
librosa.display.waveplot(y, sr=sr, max_points=0)
return plt.gcf()
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", ["x_axis", "y_axis"])
def test_unknown_axis(S_abs, axis):
kwargs = dict()
kwargs.setdefault(axis, "something not in the axis map")
plt.figure()
librosa.display.specshow(S_abs, **kwargs)
@pytest.mark.parametrize(
"data",
[
np.arange(1, 10.0), # strictly positive
-np.arange(1, 10.0), # strictly negative
np.arange(-3, 4.0), # signed,
np.arange(2, dtype=np.bool),
],
) # binary
def test_cmap_robust(data):
cmap1 = librosa.display.cmap(data, robust=False)
cmap2 = librosa.display.cmap(data, robust=True)
assert type(cmap1) is type(cmap2)
if isinstance(cmap1, matplotlib.colors.ListedColormap):
assert np.allclose(cmap1.colors, cmap2.colors)
elif isinstance(cmap1, matplotlib.colors.LinearSegmentedColormap):
assert cmap1.name == cmap2.name
else:
assert cmap1 == cmap2
@pytest.mark.mpl_image_compare(
baseline_images=["coords"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_coords(Csync, beat_t):
plt.figure()
librosa.display.specshow(Csync, x_coords=beat_t, x_axis="time", y_axis="cqt_note")
return plt.gcf()
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_bad_coords(S_abs):
librosa.display.specshow(S_abs, x_coords=np.arange(S_abs.shape[1] // 2))
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["sharex_specshow_ms"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_sharex_specshow_ms(S_abs, y, sr):
# Correct time range ~= 4.6 s or 4600ms
# Due to shared x_axis, both plots are plotted in 's'.
plt.figure(figsize=(8, 8))
ax = plt.subplot(2, 1, 1)
librosa.display.specshow(librosa.amplitude_to_db(S_abs, ref=np.max), x_axis="time")
plt.xlabel("") # hide the x label here, which is not propagated automatically
plt.subplot(2, 1, 2, sharex=ax)
librosa.display.waveplot(y, sr, x_axis="ms")
plt.xlabel("") # hide the x label here, which is not propagated automatically
return plt.gcf()
@pytest.mark.mpl_image_compare(
baseline_images=["sharex_waveplot_ms"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_sharex_waveplot_ms(y, sr, S_abs):
# Correct time range ~= 4.6 s or 4600ms
# Due to shared x_axis, both plots are plotted in 'ms'.
plt.figure(figsize=(8, 8))
ax = plt.subplot(2, 1, 1)
librosa.display.waveplot(y, sr)
plt.xlabel("") # hide the x label here, which is not propagated automatically
plt.subplot(2, 1, 2, sharex=ax)
librosa.display.specshow(librosa.amplitude_to_db(S_abs, ref=np.max), x_axis="ms")
plt.xlabel("") # hide the x label here, which is not propagated automatically
return plt.gcf()
@pytest.mark.parametrize("format_str", ["cqt_hz", "cqt_note"])
def test_axis_bound_warning(format_str):
with pytest.warns(UserWarning):
# set sr=22050
# fmin= 11025
# 72 bins
# 12 bins per octave
librosa.display.specshow(
np.zeros((72, 3)),
y_axis=format_str,
fmin=11025,
sr=22050,
bins_per_octave=12,
)
@pytest.mark.mpl_image_compare(
baseline_images=["cqt_svara"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_display_cqt_svara(C, sr):
Camp = librosa.amplitude_to_db(C, ref=np.max)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(
nrows=5, sharex=True, figsize=(10, 10)
)
librosa.display.specshow(Camp, y_axis="cqt_svara", Sa=261, ax=ax1)
librosa.display.specshow(Camp, y_axis="cqt_svara", Sa=440, ax=ax2)
librosa.display.specshow(Camp, y_axis="cqt_svara", Sa=261, ax=ax3)
librosa.display.specshow(Camp, y_axis="cqt_svara", Sa=261, mela=1, ax=ax4)
librosa.display.specshow(Camp, y_axis="cqt_svara", Sa=261, mela=1, ax=ax5)
ax3.set_ylim([440, 880])
ax5.set_ylim([440, 880])
ax1.label_outer()
ax2.label_outer()
ax3.label_outer()
ax4.label_outer()
return fig
@pytest.mark.mpl_image_compare(
baseline_images=["fft_svara"], extensions=["png"], tolerance=6
)
@pytest.mark.xfail(OLD_FT, reason=f"freetype version < {FT_VERSION}", strict=False)
def test_display_fft_svara(S_abs, sr):
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(
nrows=5, sharex=True, figsize=(10, 10)
)
librosa.display.specshow(S_abs, y_axis="fft_svara", Sa=261, ax=ax1)
librosa.display.specshow(S_abs, y_axis="fft_svara", Sa=440, ax=ax2)
librosa.display.specshow(S_abs, y_axis="fft_svara", Sa=261, ax=ax3)
librosa.display.specshow(S_abs, y_axis="fft_svara", Sa=261, mela=1, ax=ax4)
librosa.display.specshow(S_abs, y_axis="fft_svara", Sa=261, mela=1, ax=ax5)
ax3.set_ylim([440, 880])
ax5.set_ylim([440, 880])
ax1.label_outer()
ax2.label_outer()
ax3.label_outer()
ax4.label_outer()
return fig
@pytest.mark.parametrize(
"x_axis,y_axis,xlim,ylim,out",
[
(None, None, (0.0, 1.0), (0.0, 1.0), False),
("time", "linear", (0.0, 1.0), (0.0, 1.0), False),
("time", "time", (0.0, 1.0), (0.0, 2.0), False),
("chroma", "chroma", (0.0, 1.0), (0.0, 1.0), True),
],
)
def test_same_axes(x_axis, y_axis, xlim, ylim, out):
assert librosa.display.__same_axes(x_axis, y_axis, xlim, ylim) == out
def test_auto_aspect():
fig, ax = plt.subplots(nrows=4)
# Ensure auto aspect by default
for axi in ax:
axi.set(aspect="auto")
X = np.zeros((12, 12))
# Different axes should retain auto scaling
librosa.display.specshow(X, x_axis="chroma", y_axis="time", ax=ax[0])
assert ax[0].get_aspect() == "auto"
# Same axes and auto_aspect=True should force equal scaling
librosa.display.specshow(X, x_axis="chroma", y_axis="chroma", ax=ax[1])
assert ax[1].get_aspect() == 1.0
# Same axes and auto_aspect=False should retain auto scaling
librosa.display.specshow(
X, x_axis="chroma", y_axis="chroma", auto_aspect=False, ax=ax[2]
)
assert ax[2].get_aspect() == "auto"
# Different extents with auto_aspect=True should retain auto scaling
librosa.display.specshow(
X[:2, :], x_axis="chroma", y_axis="chroma", auto_aspect=True, ax=ax[3]
)
assert ax[3].get_aspect() == "auto"
| isc |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/io/tests/test_hdf.py | 3 | 19565 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
import os
import dask
import pytest
from time import sleep
import dask.dataframe as dd
from dask.utils import tmpfile, tmpdir, dependency_depth
from dask.dataframe.utils import assert_eq
def test_to_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
with tmpfile('h5') as fn:
a.x.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_series_equal(df.x, out[:])
a = dd.from_pandas(df, 1)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
# test compute = False
with tmpfile('h5') as fn:
r = a.to_hdf(fn, '/data', compute=False)
r.compute()
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
def test_to_hdf_multiple_nodes():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16]},
index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16.])
b = dd.from_pandas(df16, 16)
# saving to multiple nodes
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data*')
out = dd.read_hdf(fn, '/data*')
assert_eq(df, out)
# saving to multiple nodes making sure order is kept
with tmpfile('h5') as fn:
b.to_hdf(fn, '/data*')
out = dd.read_hdf(fn, '/data*')
assert_eq(df16, out)
# saving to multiple datasets with custom name_function
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data_*', name_function=lambda i: 'a' * (i + 1))
out = dd.read_hdf(fn, '/data_*')
assert_eq(df, out)
out = pd.read_hdf(fn, '/data_a')
tm.assert_frame_equal(out, df.iloc[:2])
out = pd.read_hdf(fn, '/data_aa')
tm.assert_frame_equal(out, df.iloc[2:])
# test multiple nodes with hdf object
with tmpfile('h5') as fn:
with pd.HDFStore(fn) as hdf:
b.to_hdf(hdf, '/data*')
out = dd.read_hdf(fn, '/data*')
assert_eq(df16, out)
def test_to_hdf_multiple_files():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16]},
index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16.])
b = dd.from_pandas(df16, 16)
# saving to multiple files
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data')
out = dd.read_hdf(fn, '/data')
assert_eq(df, out)
# saving to multiple files making sure order is kept
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
b.to_hdf(fn, '/data')
out = dd.read_hdf(fn, '/data')
assert_eq(df16, out)
# saving to multiple files with custom name_function
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data', name_function=lambda i: 'a' * (i + 1))
out = dd.read_hdf(fn, '/data')
assert_eq(df, out)
out = pd.read_hdf(os.path.join(dn, 'data_a.h5'), '/data')
tm.assert_frame_equal(out, df.iloc[:2])
out = pd.read_hdf(os.path.join(dn, 'data_aa.h5'), '/data')
tm.assert_frame_equal(out, df.iloc[2:])
# test hdf object
with tmpfile('h5') as fn:
with pd.HDFStore(fn) as hdf:
a.to_hdf(hdf, '/data*')
out = dd.read_hdf(fn, '/data*')
assert_eq(df, out)
def test_to_hdf_modes_multiple_nodes():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
# appending a single partition to existing data
a = dd.from_pandas(df, 1)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data2')
a.to_hdf(fn, '/data*', mode='a')
out = dd.read_hdf(fn, '/data*')
assert_eq(df.append(df), out)
# overwriting a file with a single partition
a = dd.from_pandas(df, 1)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data2')
a.to_hdf(fn, '/data*', mode='w')
out = dd.read_hdf(fn, '/data*')
assert_eq(df, out)
# appending two partitions to existing data
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data2')
a.to_hdf(fn, '/data*', mode='a')
out = dd.read_hdf(fn, '/data*')
assert_eq(df.append(df), out)
# overwriting a file with two partitions
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data2')
a.to_hdf(fn, '/data*', mode='w')
out = dd.read_hdf(fn, '/data*')
assert_eq(df, out)
# overwriting a single partition, keeping other partitions
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data1')
a.to_hdf(fn, '/data2')
a.to_hdf(fn, '/data*', mode='a', append=False)
out = dd.read_hdf(fn, '/data*')
assert_eq(df.append(df), out)
def test_to_hdf_modes_multiple_files():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
# appending a single partition to existing data
a = dd.from_pandas(df, 1)
with tmpdir() as dn:
fn = os.path.join(dn, 'data*')
a.to_hdf(os.path.join(dn, 'data2'), '/data')
a.to_hdf(fn, '/data', mode='a')
out = dd.read_hdf(fn, '/data*')
assert_eq(df.append(df), out)
# appending two partitions to existing data
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
fn = os.path.join(dn, 'data*')
a.to_hdf(os.path.join(dn, 'data2'), '/data')
a.to_hdf(fn, '/data', mode='a')
out = dd.read_hdf(fn, '/data')
assert_eq(df.append(df), out)
# overwriting a file with two partitions
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
fn = os.path.join(dn, 'data*')
a.to_hdf(os.path.join(dn, 'data1'), '/data')
a.to_hdf(fn, '/data', mode='w')
out = dd.read_hdf(fn, '/data')
assert_eq(df, out)
# overwriting a single partition, keeping other partitions
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
fn = os.path.join(dn, 'data*')
a.to_hdf(os.path.join(dn, 'data1'), '/data')
a.to_hdf(fn, '/data', mode='a', append=False)
out = dd.read_hdf(fn, '/data')
assert_eq(df.append(df), out)
def test_to_hdf_link_optimizations():
"""testing dask link levels is correct by calculating the depth of the dask graph"""
pytest.importorskip('tables')
df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16]},
index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16.])
a = dd.from_pandas(df16, 16)
# saving to multiple hdf files, no links are needed
# expected layers: from_pandas, to_hdf, list = depth of 3
with tmpdir() as dn:
fn = os.path.join(dn, 'data*')
d = a.to_hdf(fn, '/data', compute=False)
assert dependency_depth(d.dask) == 3
# saving to a single hdf file with multiple nodes
# all subsequent nodes depend on the first
# expected layers: from_pandas, first to_hdf(creates file+node), subsequent to_hdfs, list = 4
with tmpfile() as fn:
d = a.to_hdf(fn, '/data*', compute=False)
assert dependency_depth(d.dask) == 4
# saving to a single hdf file with a single node
# every node depends on the previous node
# expected layers: from_pandas, to_hdf times npartitions(15), list = 2 + npartitions = 17
with tmpfile() as fn:
d = a.to_hdf(fn, '/data', compute=False)
assert dependency_depth(d.dask) == 2 + a.npartitions
@pytest.mark.slow
def test_to_hdf_lock_delays():
pytest.importorskip('tables')
df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16]},
index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.,
12., 13., 14., 15., 16.])
a = dd.from_pandas(df16, 16)
# adding artifichial delays to make sure last tasks finish first
# that's a way to simulate last tasks finishing last
def delayed_nop(i):
if i[1] < 10:
sleep(0.1 * (10 - i[1]))
return i
# saving to multiple hdf nodes
with tmpfile() as fn:
a = a.apply(delayed_nop, axis=1, meta=a)
a.to_hdf(fn, '/data*')
out = dd.read_hdf(fn, '/data*')
assert_eq(df16, out)
# saving to multiple hdf files
# adding artifichial delays to make sure last tasks finish first
with tmpdir() as dn:
fn = os.path.join(dn, 'data*')
a = a.apply(delayed_nop, axis=1, meta=a)
a.to_hdf(fn, '/data')
out = dd.read_hdf(fn, '/data')
assert_eq(df16, out)
def test_to_hdf_exceptions():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 1)
# triggering too many asterisks error
with tmpdir() as dn:
with pytest.raises(ValueError):
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data_*')
# triggering too many asterisks error
with tmpfile() as fn:
with pd.HDFStore(fn) as hdf:
with pytest.raises(ValueError):
a.to_hdf(hdf, '/data_*_*')
@pytest.mark.parametrize('scheduler', ['sync', 'threads', 'processes'])
@pytest.mark.parametrize('npartitions', [1, 4, 10])
def test_to_hdf_schedulers(scheduler, npartitions):
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]},
index=[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.])
a = dd.from_pandas(df, npartitions=npartitions)
# test single file single node
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data', scheduler=scheduler)
out = pd.read_hdf(fn, '/data')
assert_eq(df, out)
# test multiple files single node
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data', scheduler=scheduler)
out = dd.read_hdf(fn, '/data')
assert_eq(df, out)
# test single file multiple nodes
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data*', scheduler=scheduler)
out = dd.read_hdf(fn, '/data*')
assert_eq(df, out)
def test_to_hdf_kwargs():
pytest.importorskip('tables')
df = pd.DataFrame({'A': ['a', 'aaaa']})
ddf = dd.from_pandas(df, npartitions=2)
with tmpfile('h5') as fn:
ddf.to_hdf(fn, 'foo4', format='table', min_itemsize=4)
df2 = pd.read_hdf(fn, 'foo4')
tm.assert_frame_equal(df, df2)
# test shorthand 't' for table
with tmpfile('h5') as fn:
ddf.to_hdf(fn, 'foo4', format='t', min_itemsize=4)
df2 = pd.read_hdf(fn, 'foo4')
tm.assert_frame_equal(df, df2)
def test_to_fmt_warns():
pytest.importorskip('tables')
df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16]},
index=[1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16.])
a = dd.from_pandas(df16, 16)
# testing warning when breaking order
with tmpfile('h5') as fn:
with pytest.warns(None):
a.to_hdf(fn, '/data*', name_function=str)
# testing warning when breaking order
with tmpdir() as dn:
with pytest.warns(None):
fn = os.path.join(dn, "data_*.csv")
a.to_csv(fn, name_function=str)
@pytest.mark.parametrize('data, compare', [
(pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.]),
tm.assert_frame_equal),
(pd.Series([1, 2, 3, 4], name='a'),
tm.assert_series_equal),
])
def test_read_hdf(data, compare):
pytest.importorskip('tables')
with tmpfile('h5') as fn:
data.to_hdf(fn, '/data')
try:
dd.read_hdf(fn, 'data', chunksize=2, mode='r')
assert False
except TypeError as e:
assert "format='table'" in str(e)
with tmpfile('h5') as fn:
data.to_hdf(fn, '/data', format='table')
a = dd.read_hdf(fn, '/data', chunksize=2, mode='r')
assert a.npartitions == 2
compare(a.compute(), data)
compare(dd.read_hdf(fn, '/data', chunksize=2, start=1, stop=3,
mode='r').compute(),
pd.read_hdf(fn, '/data', start=1, stop=3))
assert (sorted(dd.read_hdf(fn, '/data', mode='r').dask) ==
sorted(dd.read_hdf(fn, '/data', mode='r').dask))
with tmpfile('h5') as fn:
sorted_data = data.sort_index()
sorted_data.to_hdf(fn, '/data', format='table')
a = dd.read_hdf(fn, '/data', chunksize=2, sorted_index=True, mode='r')
assert a.npartitions == 2
compare(a.compute(), sorted_data)
def test_read_hdf_multiply_open():
"""Test that we can read from a file that's already opened elsewhere in
read-only mode."""
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data', format='table')
with pd.HDFStore(fn, mode='r'):
dd.read_hdf(fn, '/data', chunksize=2, mode='r')
def test_read_hdf_multiple():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16]},
index=[1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16.])
a = dd.from_pandas(df, 16)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data*')
r = dd.read_hdf(fn, '/data*', sorted_index=True)
assert a.npartitions == r.npartitions
assert a.divisions == r.divisions
assert_eq(a, r)
def test_read_hdf_start_stop_values():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data', format='table')
with pytest.raises(ValueError) as e:
dd.read_hdf(fn, '/data', stop=10)
assert 'number of rows' in str(e)
with pytest.raises(ValueError) as e:
dd.read_hdf(fn, '/data', start=10)
assert 'is above or equal to' in str(e)
with pytest.raises(ValueError) as e:
dd.read_hdf(fn, '/data', chunksize=-1)
assert 'positive integer' in str(e)
def test_hdf_globbing():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpdir() as tdir:
df.to_hdf(os.path.join(tdir, 'one.h5'), '/foo/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/bar/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/foo/data', format='table')
with dask.config.set(scheduler='sync'):
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2)
assert res.npartitions == 2
tm.assert_frame_equal(res.compute(), df)
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2, start=1, stop=3)
expected = pd.read_hdf(os.path.join(tdir, 'one.h5'), '/foo/data',
start=1, stop=3)
tm.assert_frame_equal(res.compute(), expected)
res = dd.read_hdf(os.path.join(tdir, 'two.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/foo/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))
def test_hdf_file_list():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpdir() as tdir:
df.iloc[:2].to_hdf(os.path.join(tdir, 'one.h5'), 'dataframe', format='table')
df.iloc[2:].to_hdf(os.path.join(tdir, 'two.h5'), 'dataframe', format='table')
with dask.config.set(scheduler='sync'):
input_files = [os.path.join(tdir, 'one.h5'), os.path.join(tdir, 'two.h5')]
res = dd.read_hdf(input_files, 'dataframe')
tm.assert_frame_equal(res.compute(), df)
def test_read_hdf_doesnt_segfault():
pytest.importorskip('tables')
with tmpfile('h5') as fn:
N = 40
df = pd.DataFrame(np.random.randn(N, 3))
with pd.HDFStore(fn, mode='w') as store:
store.append('/x', df)
ddf = dd.read_hdf(fn, '/x', chunksize=2)
assert len(ddf) == N
def test_hdf_filenames():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
ddf = dd.from_pandas(df, npartitions=2)
assert ddf.to_hdf("foo*.hdf5", "key") == ["foo0.hdf5", "foo1.hdf5"]
os.remove("foo0.hdf5")
os.remove("foo1.hdf5")
| gpl-3.0 |
yunfeiz/py_learnt | quant/demo/tf_RNN.py | 1 | 5095 | # -*- coding: utf-8 -*-
# @DATE : 2017/2/14 17:50
# @Author :
# @File : stock_predict.py
import os
import sys
import datetime
import tensorflow as tf
import pandas as pd
import numpy as np
from yahoo_finance import Share
import matplotlib.pyplot as plt
#from utils import get_n_day_before, date_2_str
class StockRNN(object):
def __init__(self, seq_size=12, input_dims=1, hidden_layer_size=12, stock_id="BABA", days=365, log_dir="stock_model/"):
self.seq_size = seq_size
self.input_dims = input_dims
self.hidden_layer_size = hidden_layer_size
self.stock_id = stock_id
self.days = days
self.data = self._read_stock_data()["Adj_Close"].astype(float).values
self.log_dir = log_dir
def _read_stock_data(self):
stock = Share(self.stock_id)
end_date = "05-20-2017"#date_2_str(datetime.date.today())
start_date = "01-01-2017"#get_n_day_before(200)
# print(start_date, end_date)
his_data = stock.get_historical(start_date=start_date, end_date=end_date)
stock_pd = pd.DataFrame(his_data)
stock_pd["Adj_Close"] = stock_pd["Adj_Close"].astype(float)
stock_pd.sort_values(["Date"], inplace=True, ascending=True)
stock_pd.reset_index(inplace=True)
return stock_pd[["Date", "Adj_Close"]]
def _create_placeholders(self):
with tf.name_scope(name="data"):
self.X = tf.placeholder(tf.float32, [None, self.seq_size, self.input_dims], name="x_input")
self.Y = tf.placeholder(tf.float32, [None, self.seq_size], name="y_input")
def init_network(self, log_dir):
print("Init RNN network")
self.log_dir = log_dir
self.sess = tf.Session()
self.summary_op = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(self.log_dir)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Model restore")
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(self.sess, self.coord)
def _create_rnn(self):
W = tf.Variable(tf.random_normal([self.hidden_layer_size, 1], name="W"))
b = tf.Variable(tf.random_normal([1], name="b"))
with tf.variable_scope("cell_d"):
cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_layer_size)
with tf.variable_scope("rnn_d"):
outputs, states = tf.nn.dynamic_rnn(cell, self.X, dtype=tf.float32)
W_repeated = tf.tile(tf.expand_dims(W, 0), [tf.shape(self.X)[0], 1, 1])
out = tf.matmul(outputs, W_repeated) + b
out = tf.squeeze(out)
return out
def _data_prepare(self):
self.train_x = []
self.train_y = []
# data
data = np.log1p(self.data)
for i in xrange(len(data) - self.seq_size - 1):
self.train_x.append(np.expand_dims(data[i: i + self.seq_size], axis=1).tolist())
self.train_y.append(data[i + 1: i + self.seq_size + 1].tolist())
def train_pred_rnn(self):
self._create_placeholders()
y_hat = self._create_rnn()
self._data_prepare()
loss = tf.reduce_mean(tf.square(y_hat - self.Y))
train_optim = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
feed_dict = {self.X: self.train_x, self.Y: self.train_y}
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in xrange(1, 20001):
_, loss_ = sess.run([train_optim, loss], feed_dict=feed_dict)
if step % 100 == 0:
print("{} {}".format(step, loss_))
saver.save(sess, self.log_dir + "model.ckpt")
# prediction
prev_seq = self.train_x[-1]
predict = []
for i in range(5):
next_seq = sess.run(y_hat, feed_dict={self.X: [prev_seq]})
predict.append(next_seq[-1])
prev_seq = np.vstack((prev_seq[1:], next_seq[-1]))
predict = np.exp(predict) - 1
print(predict)
self.pred = predict
def visualize(self):
pred = self.pred
plt.figure()
plt.legend(prop={'family': 'SimHei', 'size': 15})
plt.plot(list(range(len(self.data))), self.data, color='b')
plt.plot(list(range(len(self.data), len(self.data) + len(pred))), pred, color='r')
plt.title(u"{}股价预测".format(self.stock_id), fontproperties="SimHei")
plt.xlabel(u"日期", fontproperties="SimHei")
plt.ylabel(u"股价", fontproperties="SimHei")
plt.savefig("stock.png")
plt.show()
if __name__ == "__main__":
stock = StockRNN()
# print(stock.read_stock_data())
log_dir = "stock_model"
stock.train_pred_rnn()
stock.visualize() | apache-2.0 |
shyamalschandra/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
SciTools/iris | docs/iris/gallery_code/general/plot_rotated_pole_mapping.py | 4 | 1690 | """
Rotated Pole Mapping
=====================
This example uses several visualisation methods to achieve an array of
differing images, including:
* Visualisation of point based data
* Contouring of point based data
* Block plot of contiguous bounded data
* Non native projection and a Natural Earth shaded relief image underlay
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import iris
import iris.analysis.cartography
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load some test data.
fname = iris.sample_data_path("rotated_pole.nc")
air_pressure = iris.load_cube(fname)
# Plot #1: Point plot showing data values & a colorbar
plt.figure()
points = qplt.points(air_pressure, c=air_pressure.data)
cb = plt.colorbar(points, orientation="horizontal")
cb.set_label(air_pressure.units)
plt.gca().coastlines()
iplt.show()
# Plot #2: Contourf of the point based data
plt.figure()
qplt.contourf(air_pressure, 15)
plt.gca().coastlines()
iplt.show()
# Plot #3: Contourf overlayed by coloured point data
plt.figure()
qplt.contourf(air_pressure)
iplt.points(air_pressure, c=air_pressure.data)
plt.gca().coastlines()
iplt.show()
# For the purposes of this example, add some bounds to the latitude
# and longitude
air_pressure.coord("grid_latitude").guess_bounds()
air_pressure.coord("grid_longitude").guess_bounds()
# Plot #4: Block plot
plt.figure()
plt.axes(projection=ccrs.PlateCarree())
iplt.pcolormesh(air_pressure)
plt.gca().stock_img()
plt.gca().coastlines()
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
SoimulPatriei/TMCleaner | Translation.py | 1 | 6836 | #!/usr/bin/env python
"""Use a Machine Translation engine to translate the source segment and
compute features based on the similarity
between source and the translation of the source"""
__author__ = "Eduard Barbu"
__copyright__ = "Copyright 2016, The Expert Project"
import logging
import sys
import codecs
import collections
import math
import urllib2
import urllib
import re
import string
from collections import Counter
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import Parameters
class Translate:
"""Use a Machine Translation engine to translate the source segment
and compute features based on the similarity between source and the translation of the source."""
def __init__(self,fSegments,fTranslations,sLanguage,tLanguage ):
self.fSegments=fSegments
self.fTranslations=fTranslations
self.sLanguage=sLanguage
self.tLanguage=tLanguage
def removePunctuation (self,segment) :
"""It removes the punctuation from a segment.
In this way increases the probability of good similarity match."""
punctList=list(string.punctuation)
for punct in punctList :
segment=segment.replace(punct," ")
return segment
def getSegmentWordList (self,segment) :
"Gets the word list of segment for the purpose of computing the cosine similarity"
segment=re.sub("^\s+","",segment)
segment=re.sub("\s+$","",segment)
segmentList=re.split("\s+",segment)
segmentListPruned=[];
punctuationSigns=[".","?","!",":",";","-","(",")","[","]","'",'"',"/",",","+"]
for el in segmentList :
if not (el in punctuationSigns) :
segmentListPruned.append(el)
return segmentListPruned
def prepareVectors (self,sourceVect,destVect) :
"""Prepare the vectors to for cosine similarity function """
X=np.asarray(sourceVect).reshape(1,-1)
Y=np.asarray(destVect).reshape(1,-1)
return X,Y
def getVector (self,uniqueFeatureList, featureDict) :
"Get the vectors to be used in the computation of cosine similarity"
vector=[]
for feature in uniqueFeatureList :
if feature in featureDict :
vector.append(featureDict[feature])
else :
vector.append(0)
return vector
def getSegmentSimilarity (self,sSegment,tSegment) :
"""It gets the cosine similarity between the translation of the source segment and the target segment"""
for sourceSegmentTranslated,e in self.getBingTranslation (sSegment):
if e:
logging.error( "Got a translation exception %s" %e)
return 0,0
else :
sourceSegmentTranslated=unicode(sourceSegmentTranslated,"UTF-8")
sourceSegmentTranslatedLower=self.removePunctuation (sourceSegmentTranslated.lower())
sourceSegmentTranslatedPrunedList=self.getSegmentWordList (sourceSegmentTranslatedLower)
targetSegmentP=self.removePunctuation (tSegment.lower())
targetSegmentPrunedList=self.getSegmentWordList (targetSegmentP)
uniqueFeatureList=list(set(targetSegmentPrunedList+sourceSegmentTranslatedPrunedList))
sourceDict=Counter(sourceSegmentTranslatedPrunedList)
destDict=Counter(targetSegmentPrunedList)
sourceVect=self.getVector(uniqueFeatureList,sourceDict)
destVect=self.getVector(uniqueFeatureList,destDict)
X,Y=self.prepareVectors (sourceVect,destVect)
cosineSimilarity=cosine_similarity(X,Y)
return (sourceSegmentTranslated,cosineSimilarity[0][0])
def getTranslationSimilarity(self):
fi = codecs.open( self.fSegments, "r", "utf-8")
scoreTranslation={}
fo = codecs.open( self.fTranslations, "w", "utf-8")
for line in fi:
line=line.rstrip()
components=line.split("@#@")
key=components[0]+"-"+components[1]
sSegment=components[2]
tSegment=components[3]
sourceSegmentTranslated,score=self.getSegmentSimilarity(sSegment,tSegment)
if sourceSegmentTranslated ==0 :
sourceSegmentTranslated="Got Exception"
fo.write(key+"@#@"+sourceSegmentTranslated+"@#@"+str(round(score,2))+"\n")
scoreTranslation[key]=round(score,2)
fi.close()
fo.close()
return scoreTranslation
def addFeatures (self,segmentFeaturesDict) :
"""Add features to the dictionary."""
scoreTranslation=self.getTranslationSimilarity()
for key in segmentFeaturesDict:
if key in scoreTranslation :
segmentFeaturesDict[key]["simscore"]=scoreTranslation[key]
else :
segmentFeaturesDict[key]["simscore"]=0
def getBingTranslation (self,sSegment) :
"Get the source Segment translation using BING API"
fBing="Parameters/p-Bing.txt"
pDict=Parameters.readParameters(fBing)
try :
sSegment=urllib.quote(sSegment.encode("utf-8"))
parameters="&from="+self.sLanguage+"&to="+self.tLanguage+"&text="+sSegment
url=pDict["bingAPI"]+pDict["key"]+parameters
answer=urllib2.urlopen(url).read()
pattern="""<string xmlns="http://schemas.microsoft.com/2003/10/Serialization/">(.+)</string>"""
m = re.search(pattern, answer)
tSegment="Error#"
if m :
tSegment=m.group(1)
except Exception as e:
yield (None ,e)
else :
yield(tSegment,None)
def testSet():
"""The Test set in a dict """
tDict= {"yeah, hurricane selina.":"- Gia'. L'uragano Selina",
"uh-oh, wait a minute.":"Oh, aspettate un attimo.",
"does that exist, hurricane selina?":"Ma esiste? L'uragano Selina?",
"- oh, i don't-- - can we run a check on that?":"- Oh, non... - Possiamo controllare?"}
return tDict
def getSegmentFeatures(fSegments):
"""Simulates a segment features dict """
segmentFeaturesDict={}
fi = codecs.open(fSegments, "r", "utf-8")
for line in fi:
line=line.rstrip()
components=line.split("@#@")
key=components[0]+"-"+components[1]
segmentFeaturesDict[key]={}
segmentFeaturesDict[key]["fakeFeature"]=0
fi.close()
return segmentFeaturesDict
def main():
sLanguage="en"
tLanguage="it"
fSegments="TestFiles/split-618.txt"
fTranslations="translations.txt"
print "Translate and Compute Similarity"
segmentFeaturesDict=getSegmentFeatures(fSegments)
tr=Translate(fSegments,fTranslations,sLanguage,tLanguage)
tr.addFeatures(segmentFeaturesDict)
print "Done"
if __name__ == '__main__':
main()
| lgpl-3.0 |
plotly/plotly.py | packages/python/plotly/_plotly_utils/colors/cmocean.py | 1 | 6360 | """
Color scales from the cmocean project
Learn more at https://matplotlib.org/cmocean/
cmocean is made available under an MIT license: https://github.com/matplotlib/cmocean/blob/master/LICENSE.txt
"""
from ._swatches import _swatches, _swatches_continuous
def swatches(template=None):
return _swatches(__name__, globals(), template)
swatches.__doc__ = _swatches.__doc__
def swatches_continuous(template=None):
return _swatches_continuous(__name__, globals(), template)
swatches_continuous.__doc__ = _swatches_continuous.__doc__
turbid = [
"rgb(232, 245, 171)",
"rgb(220, 219, 137)",
"rgb(209, 193, 107)",
"rgb(199, 168, 83)",
"rgb(186, 143, 66)",
"rgb(170, 121, 60)",
"rgb(151, 103, 58)",
"rgb(129, 87, 56)",
"rgb(104, 72, 53)",
"rgb(80, 59, 46)",
"rgb(57, 45, 37)",
"rgb(34, 30, 27)",
]
thermal = [
"rgb(3, 35, 51)",
"rgb(13, 48, 100)",
"rgb(53, 50, 155)",
"rgb(93, 62, 153)",
"rgb(126, 77, 143)",
"rgb(158, 89, 135)",
"rgb(193, 100, 121)",
"rgb(225, 113, 97)",
"rgb(246, 139, 69)",
"rgb(251, 173, 60)",
"rgb(246, 211, 70)",
"rgb(231, 250, 90)",
]
haline = [
"rgb(41, 24, 107)",
"rgb(42, 35, 160)",
"rgb(15, 71, 153)",
"rgb(18, 95, 142)",
"rgb(38, 116, 137)",
"rgb(53, 136, 136)",
"rgb(65, 157, 133)",
"rgb(81, 178, 124)",
"rgb(111, 198, 107)",
"rgb(160, 214, 91)",
"rgb(212, 225, 112)",
"rgb(253, 238, 153)",
]
solar = [
"rgb(51, 19, 23)",
"rgb(79, 28, 33)",
"rgb(108, 36, 36)",
"rgb(135, 47, 32)",
"rgb(157, 66, 25)",
"rgb(174, 88, 20)",
"rgb(188, 111, 19)",
"rgb(199, 137, 22)",
"rgb(209, 164, 32)",
"rgb(217, 192, 44)",
"rgb(222, 222, 59)",
"rgb(224, 253, 74)",
]
ice = [
"rgb(3, 5, 18)",
"rgb(25, 25, 51)",
"rgb(44, 42, 87)",
"rgb(58, 60, 125)",
"rgb(62, 83, 160)",
"rgb(62, 109, 178)",
"rgb(72, 134, 187)",
"rgb(89, 159, 196)",
"rgb(114, 184, 205)",
"rgb(149, 207, 216)",
"rgb(192, 229, 232)",
"rgb(234, 252, 253)",
]
gray = [
"rgb(0, 0, 0)",
"rgb(16, 16, 16)",
"rgb(38, 38, 38)",
"rgb(59, 59, 59)",
"rgb(81, 80, 80)",
"rgb(102, 101, 101)",
"rgb(124, 123, 122)",
"rgb(146, 146, 145)",
"rgb(171, 171, 170)",
"rgb(197, 197, 195)",
"rgb(224, 224, 223)",
"rgb(254, 254, 253)",
]
oxy = [
"rgb(63, 5, 5)",
"rgb(101, 6, 13)",
"rgb(138, 17, 9)",
"rgb(96, 95, 95)",
"rgb(119, 118, 118)",
"rgb(142, 141, 141)",
"rgb(166, 166, 165)",
"rgb(193, 192, 191)",
"rgb(222, 222, 220)",
"rgb(239, 248, 90)",
"rgb(230, 210, 41)",
"rgb(220, 174, 25)",
]
deep = [
"rgb(253, 253, 204)",
"rgb(206, 236, 179)",
"rgb(156, 219, 165)",
"rgb(111, 201, 163)",
"rgb(86, 177, 163)",
"rgb(76, 153, 160)",
"rgb(68, 130, 155)",
"rgb(62, 108, 150)",
"rgb(62, 82, 143)",
"rgb(64, 60, 115)",
"rgb(54, 43, 77)",
"rgb(39, 26, 44)",
]
dense = [
"rgb(230, 240, 240)",
"rgb(191, 221, 229)",
"rgb(156, 201, 226)",
"rgb(129, 180, 227)",
"rgb(115, 154, 228)",
"rgb(117, 127, 221)",
"rgb(120, 100, 202)",
"rgb(119, 74, 175)",
"rgb(113, 50, 141)",
"rgb(100, 31, 104)",
"rgb(80, 20, 66)",
"rgb(54, 14, 36)",
]
algae = [
"rgb(214, 249, 207)",
"rgb(186, 228, 174)",
"rgb(156, 209, 143)",
"rgb(124, 191, 115)",
"rgb(85, 174, 91)",
"rgb(37, 157, 81)",
"rgb(7, 138, 78)",
"rgb(13, 117, 71)",
"rgb(23, 95, 61)",
"rgb(25, 75, 49)",
"rgb(23, 55, 35)",
"rgb(17, 36, 20)",
]
matter = [
"rgb(253, 237, 176)",
"rgb(250, 205, 145)",
"rgb(246, 173, 119)",
"rgb(240, 142, 98)",
"rgb(231, 109, 84)",
"rgb(216, 80, 83)",
"rgb(195, 56, 90)",
"rgb(168, 40, 96)",
"rgb(138, 29, 99)",
"rgb(107, 24, 93)",
"rgb(76, 21, 80)",
"rgb(47, 15, 61)",
]
speed = [
"rgb(254, 252, 205)",
"rgb(239, 225, 156)",
"rgb(221, 201, 106)",
"rgb(194, 182, 59)",
"rgb(157, 167, 21)",
"rgb(116, 153, 5)",
"rgb(75, 138, 20)",
"rgb(35, 121, 36)",
"rgb(11, 100, 44)",
"rgb(18, 78, 43)",
"rgb(25, 56, 34)",
"rgb(23, 35, 18)",
]
amp = [
"rgb(241, 236, 236)",
"rgb(230, 209, 203)",
"rgb(221, 182, 170)",
"rgb(213, 156, 137)",
"rgb(205, 129, 103)",
"rgb(196, 102, 73)",
"rgb(186, 74, 47)",
"rgb(172, 44, 36)",
"rgb(149, 19, 39)",
"rgb(120, 14, 40)",
"rgb(89, 13, 31)",
"rgb(60, 9, 17)",
]
tempo = [
"rgb(254, 245, 244)",
"rgb(222, 224, 210)",
"rgb(189, 206, 181)",
"rgb(153, 189, 156)",
"rgb(110, 173, 138)",
"rgb(65, 157, 129)",
"rgb(25, 137, 125)",
"rgb(18, 116, 117)",
"rgb(25, 94, 106)",
"rgb(28, 72, 93)",
"rgb(25, 51, 80)",
"rgb(20, 29, 67)",
]
phase = [
"rgb(167, 119, 12)",
"rgb(197, 96, 51)",
"rgb(217, 67, 96)",
"rgb(221, 38, 163)",
"rgb(196, 59, 224)",
"rgb(153, 97, 244)",
"rgb(95, 127, 228)",
"rgb(40, 144, 183)",
"rgb(15, 151, 136)",
"rgb(39, 153, 79)",
"rgb(119, 141, 17)",
"rgb(167, 119, 12)",
]
balance = [
"rgb(23, 28, 66)",
"rgb(41, 58, 143)",
"rgb(11, 102, 189)",
"rgb(69, 144, 185)",
"rgb(142, 181, 194)",
"rgb(210, 216, 219)",
"rgb(230, 210, 204)",
"rgb(213, 157, 137)",
"rgb(196, 101, 72)",
"rgb(172, 43, 36)",
"rgb(120, 14, 40)",
"rgb(60, 9, 17)",
]
delta = [
"rgb(16, 31, 63)",
"rgb(38, 62, 144)",
"rgb(30, 110, 161)",
"rgb(60, 154, 171)",
"rgb(140, 193, 186)",
"rgb(217, 229, 218)",
"rgb(239, 226, 156)",
"rgb(195, 182, 59)",
"rgb(115, 152, 5)",
"rgb(34, 120, 36)",
"rgb(18, 78, 43)",
"rgb(23, 35, 18)",
]
curl = [
"rgb(20, 29, 67)",
"rgb(28, 72, 93)",
"rgb(18, 115, 117)",
"rgb(63, 156, 129)",
"rgb(153, 189, 156)",
"rgb(223, 225, 211)",
"rgb(241, 218, 206)",
"rgb(224, 160, 137)",
"rgb(203, 101, 99)",
"rgb(164, 54, 96)",
"rgb(111, 23, 91)",
"rgb(51, 13, 53)",
]
# Prefix variable names with _ so that they will not be added to the swatches
_contents = dict(globals())
for _k, _cols in _contents.items():
if _k.startswith("_") or _k.startswith("swatches") or _k.endswith("_r"):
continue
globals()[_k + "_r"] = _cols[::-1]
| mit |
TheCoSMoCompany/biopredyn | Prototype/scripts/parameter_estimation.py | 1 | 5307 | #!/usr/bin/env python
# coding=utf-8
from biopredyn import resources, workflow, result as res
from matplotlib import colors, pyplot as plt
import numpy as np
from scipy.stats import f
from scipy.linalg import svd
from scipy.stats import norm
from COPASI import CCopasiMethod
# required inputs
simulation_file = "generate_data.xml"
calibration_file = "calibration_data.txt"
validation_file = "validation_data.txt"
observables = ["sp_C"] # names of the observables
unknowns = ["k1", "k2", "k3"] # names of the parameters to be estimated
min_unknown_values = [0.0, 0.0, 0.0] # lower bound of the parameter value ranges
max_unknown_values = [10.0, 10.0, 10.0] # upper bound of the parameter value ranges
algo = CCopasiMethod.LevenbergMarquardt
rm = resources.ResourceManager()
wf = workflow.WorkFlow(rm, source=simulation_file)
sim = wf.get_simulations()[0]
model_result = sim.run_as_parameter_estimation(
wf.get_models()[0], calibration_file, validation_file,
observables, unknowns, min_unknown_values, max_unknown_values, algo, rm)
# plotting model and data results
plt.figure("Fitted model: " + wf.get_models()[0].get_id())
for s in model_result.get_fitted_result().get_result().keys():
if not str.lower(s).__contains__("time"):
results = model_result.get_fitted_result().get_quantities_per_species(s)
plt.plot(model_result.get_fitted_result().get_time_steps(),
results, label=s)
# plot data only if it is available
if s in observables:
dat = model_result.get_validation_data().get_species_as_mean_std(s)
data_label = str(s) + "_experimental"
plt.errorbar(model_result.get_validation_data().get_time_steps(),
dat[:,0], yerr=dat[:,1], ls='None', marker='+', label=data_label)
plt.legend(loc='center right')
print("====================================================================")
print("Fisher Information Matrix")
print(model_result.get_fisher_information_matrix())
print("====================================================================")
print("Covariance matrix")
print(model_result.get_covariance_matrix())
print("====================================================================")
print("Correlation matrix")
print(model_result.get_correlation_matrix())
scale = colors.Normalize(vmin=-1, vmax=1)
cor_plot = plt.matshow(model_result.get_correlation_matrix(),
fignum="Correlation matrix", norm=scale)
plt.xticks(np.arange(len(unknowns)), unknowns)
plt.yticks(np.arange(len(unknowns)), unknowns)
plt.colorbar(cor_plot)
print("====================================================================")
print("Objective function value: " + str(model_result.get_objective_value()))
print("====================================================================")
print("Delta dependent confidence intervals (alpha = 0.05)")
print(model_result.get_dependent_confidence_intervals())
print("====================================================================")
print("Delta independent confidence intervals (alpha = 0.05)")
print(model_result.get_independent_confidence_intervals())
print("====================================================================")
print("Eigenvectors")
print(model_result.get_fim_eigenvectors())
print("====================================================================")
print("Singular values")
print(model_result.get_fim_singular_values())
val_data = res.TimeSeries()
val_data.import_from_csv_file(validation_file, rm)
residuals = model_result.get_residuals()
# statistical measures on residuals
res_min = residuals.min()
res_max = residuals.max()
res_mean = residuals.mean()
res_var = residuals.var()
res_std = residuals.std()
print("===============================================================")
print("Minimum of the residuals: " + str(res_min))
print("Maximum of the residuals: " + str(res_max))
print("Mean of the residuals: " + str(res_mean))
print("Variance of the residuals: " + str(res_var))
print("Coefficient of variation: " +
str(model_result.get_residuals_coeff_of_variation()))
# plotting residuals versus time-ordered data
plt.figure("Analysis of the residuals")
plt.subplot(211)
val_time_points = np.array(val_data.get_time_steps())
plt.plot(val_time_points, residuals, 'r+')
plt.legend()
# add y=0 line for reference and axis labels
plt.axhline(0, color='grey')
plt.xlabel('Observation order')
plt.ylabel('Residual')
# plotting residuals as a histogram
plt.subplot(212)
(res_h, res_edges, res_p) = plt.hist(residuals)
plt.xlabel('Residual')
plt.ylabel('Frequency')
# plot associated pdf
dist = norm(loc = res_mean, scale = res_std)
x = np.linspace(res_min, res_max, 100)
plt.plot(x, dist.pdf(x), 'k-', lw=2)
# Pearson's chi-squared test
chi_test = model_result.check_residuals_randomness()
print("Pearson's chi-squared test - H0: residuals follow a N(0,1)")
print("P value = " + str(chi_test[0]))
if chi_test[1]:
print("Not possible to reject null hypothesis.")
else:
print("Reject null hypothesis: residuals do not have a random behavior.")
# Runs test
runs_test = model_result.check_residuals_correlation()
print("Wald-Wolfowitz test - H0: residuals are uncorrelated")
print("P value = " + str(runs_test[0]))
if runs_test[1]:
print("Not possible to reject null hypothesis.")
else:
print("Reject null hypothesis: residuals show some correlation.")
plt.show()
| bsd-3-clause |
DmitryYurov/BornAgain | dev-tools/analyze/baloc/history_plot.py | 3 | 1917 | import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from .file_types import FileTypes, filetype
selected_file_type = [FileTypes.CORE, FileTypes.FTEST, FileTypes.UTEST, FileTypes.GUI, FileTypes.PYAPI]
def read_history(filename):
xvals = [] # Time
ydata = {} # id of file type .vs. LOC .vs. time
for x in selected_file_type:
ydata[x] = []
print("Reading file {0}".format(filename))
with open(filename, 'r') as the_file:
for line in the_file:
parts = line.strip().split()
date = datetime.strptime(parts[0] + " " + parts[1], '%Y-%m-%d %H:%M:%S')
xvals.append(date)
for x in selected_file_type:
ydata[x].append(int(parts[2+x]))
yvals = []
descr = []
for key in ydata:
descr.append(FileTypes.descr[key])
yvals.append(ydata[key])
# printing summary of LOC
for x in range(0, len(yvals)):
print("{:18} : {:10}".format(descr[x], yvals[x][-1]))
return xvals, yvals, descr
def history_plot(filename):
xvals, yvals, descr = read_history(filename)
# figure size
my_dpi = 96
plt.figure(figsize=(1600*1.2 / my_dpi, 900*1.2 / my_dpi), dpi=my_dpi)
plt.style.use('seaborn-bright')
# making stackplot
plt.stackplot(xvals, yvals)
pal = ["#3399ff", "#ffcc00", "#ff0000", "#0033ff", "#999999"]
plt.stackplot(xvals, yvals, labels=descr, colors=pal)
# styling axes and grid
plt.grid(color='gray', linestyle='dashed')
ax = plt.gca()
ax.set_axisbelow(True)
plt.ylim(0.0, 220e+03)
plt.tick_params(axis='both', which='major', labelsize=14)
# making inverse legend
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc='upper left', prop={'size': 18})
# saving plot
plt.savefig('lines_of_code.png', dpi=my_dpi, bbox_inches='tight')
| gpl-3.0 |
lenovor/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
georgid/SourceFilterContoursMelody | smstools/workspace/harmonicModel_function.py | 1 | 10695 | # function to call the extractHarmSpec analysis/synthesis functions in software/models/harmonicModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
import logging
from Parameters import Parameters
import math
from scipy.signal import blackmanharris, triang
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../software/models/'))
import utilFunctions as UF
import sineModel as SM
import harmonicModel as HM
# import software.models.utilFunctions as UF
# import software.models.sineModel as SM
# import software.models.harmonicModel as HM
# inputFile = '../sounds/vignesh.wav'
# def extractHarmSpec(inputFile='../sounds/vignesh.wav', window='blackman', M=1201, N=2048, t=-90,
# minSineDur=0.1, nH=100, minf0=130, maxf0=300, f0et=7, harmDevSlope=0.01):
# increasing the threshold means discarding more peaks and selecting less
def extractHarmSpec( inputFile, f0FreqsRaw, fromTs=-1, toTs=-1, t=Parameters.harmonicTreshold, window='blackman', M=Parameters.wSize - 1, N=2048 ,
minSineDur=0.0, nH=Parameters.nHarmonics, harmDevSlope=0.02):
"""
Analysis and synthesis using the harmonic models_makam
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics could have higher allowed deviation
"""
# if not monophonic, convert to monophonic
# read input sound
(fs, x) = UF.wavread(inputFile)
# hopsize
hopSizeMelodia = int( round( (float(f0FreqsRaw[1][0]) - float(f0FreqsRaw[0][0]) ) * fs ) )
### get indices in melodia
toTs = float(toTs)
if fromTs==-1 and toTs==-1:
logging.debug("fromTs and toTs not defined. extracting whole recording")
fromTs=0; toTs=f0FreqsRaw[-1][0]
finalPitchSeriesTs = f0FreqsRaw[-1][0]
if finalPitchSeriesTs < fromTs or finalPitchSeriesTs < toTs:
sys.exit('pitch series have final time= {} and requested fromTs= {} and toTs={}'.format(finalPitchSeriesTs, fromTs, toTs) )
idx = 0
while fromTs > float(f0FreqsRaw[idx][0]):
idx += 1
firstTs = float(f0FreqsRaw[idx][0])
pinFirst = round (firstTs * fs)
f0Series = []
while idx < len(f0FreqsRaw) and float(f0FreqsRaw[idx][0]) <= toTs:
f0Series.append(float(f0FreqsRaw[idx][1]))
idx += 1
lastTs = float(f0FreqsRaw[idx-1][0])
pinLast = round (lastTs * fs)
# discard ts-s
# for foFreqRaw in range() f0FreqsRaw:
# f0Series.append(float(foFreqRaw[1]))
# size of fft used in synthesis
# hop size (has to be 1/4 of Ns)
# H = 128
# compute analysis window
w = get_window(window, M)
# detect harmonics of input sound
hfreq, hmag, hphase = HM.harmonicModelAnal_2(x, fs, w, N, hopSizeMelodia, pinFirst, t, nH, f0Series, harmDevSlope, minSineDur)
# w/o melodia and with resynthesis
# minf0=130
# maxf0=300
# f0et=7
# HM.harmonicModel(x, fs, w, N, t, nH, minf0, maxf0, f0et)
# return hfreq, hmag, hphase, fs, hopSizeMelodia, x[pinFirst:pinLast]
return hfreq, hmag, hphase, fs, hopSizeMelodia, x, w, N
def resynthesize(hfreq, hmag, hphase, fs, hopSizeMelodia, URIOutputFile):
''' synthesize the harmonics
'''
# Ns = 512
Ns = 4 * hopSizeMelodia
y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, hopSizeMelodia, fs)
# output sound file (monophonic with sampling rate of 44100)
# URIOutputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModel.wav'
# write the sound resulting from harmonic analysis
UF.wavwrite(y, fs, URIOutputFile)
print 'written file ' + URIOutputFile
return y
def hprModel_2(x, fs, w, N, t, nH, hfreq, hmag, hphase, outVocalURI, outbackGrURI):
"""
Analysis/synthesis of a sound using the harmonic plus residual model
x: input sound, fs: sampling rate, w: analysis window,
N: FFT size (minimum 512), t: threshold in negative dB,
nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz,
maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5),
maxhd: max. relative deviation in harmonic detection (ex: .2)
returns y: output sound, yh: harmonic component, xr: residual component
"""
hN = N/2 # size of positive spectrum
hM1 = int(math.floor((w.size+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size/2)) # half analysis window size by floor
Ns = 512 # FFT size for synthesis (even)
H = Ns/4 # Hop size used for analysis and synthesis
hNs = Ns/2
pin = max(hNs, hM1) # initialize sound pointer in middle of analysis window
pend = x.size - max(hNs, hM1) # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
yhw = np.zeros(Ns) # initialize output sound frame
xrw = np.zeros(Ns) # initialize output sound frame
yh = np.zeros(x.size) # initialize output array
xr = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns)
ow = triang(2*H) # overlapping window
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
wr = bh # window for residual
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
hfreqp = []
f0t = 0
i = 0
while pin<pend and i < len(hfreq):
#-----analysis-----
hfreqp = hfreq
ri = pin-hNs-1 # input sound pointer for residual analysis
xw2 = x[ri:ri+Ns]*wr # window the input sound
fftbuffer = np.zeros(Ns) # reset buffer
fftbuffer[:hNs] = xw2[hNs:] # zero-phase window in fftbuffer
fftbuffer[hNs:] = xw2[:hNs]
X2 = fft(fftbuffer) # compute FFT of input signal for residual analysis
#-----synthesis-----
Yh = UF.genSpecSines(hfreq[i,:], hmag[i,:], hphase[i,:], Ns, fs) # generate sines
# soft masking
Yh, Xr = softMask(X2, Yh, i)
fftbuffer = np.zeros(Ns)
fftbuffer = np.real(ifft(Yh)) # inverse FFT of harmonic spectrum
yhw[:hNs-1] = fftbuffer[hNs+1:] # undo zero-phase window
yhw[hNs-1:] = fftbuffer[:hNs+1]
fftbuffer = np.real(ifft(Xr)) # inverse FFT of residual spectrum
xrw[:hNs-1] = fftbuffer[hNs+1:] # undo zero-phase window
xrw[hNs-1:] = fftbuffer[:hNs+1]
yh[ri:ri+Ns] += sw*yhw # overlap-add for sines
xr[ri:ri+Ns] += sw*xrw # overlap-add for residual
pin += H
i += 1 # advance sound pointer
# sum of harmonic and residual components
UF.wavwrite(yh, fs, outVocalURI)
print 'written file ' + outVocalURI
UF.wavwrite(xr, fs, outbackGrURI)
print 'written file ' + outbackGrURI
return yh, xr
def softMask(X, V,i):
'''
X - original spectrum
V - vocal estim
'''
div = np.divide(abs(V), abs(X))
indices_clipping = np.where(div>1)[0]
if len(indices_clipping) > 1:
print 'it happens {} times at time {}'.format(len(indices_clipping),i)
maskVocal = np.minimum(div,1)
V_est = np.multiply(X, maskVocal )
K_est = np.multiply(X, 1-maskVocal)
return V_est, K_est
# ##########################
# ## plotting of harmonic spectrum
#
def visualizeHarmSp(x, y, hopSizeMelodia ):
# create figure to show plots
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 10000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the harmonic frequencies
plt.subplot(3,1,2)
if (hfreq.shape[1] > 0):
numFrames = hfreq.shape[0]
frmTime = hopSizeMelodia * np.arange(numFrames)/float(fs)
hfreq[hfreq<=0] = np.nan
plt.plot(frmTime, hfreq)
plt.axis([0, x.size/float(fs), 0, maxplotfreq])
plt.title('frequencies of harmonic tracks')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# inputFile = 'example_data/dan-erhuang_01_1.wav'
# melodiaInput = 'example_data/dan-erhuang_01.txt'
inputFile = '/Users/joro/Documents/Phd/UPF/arias/laosheng-erhuang_04.wav'
melodiaInput = '/Users/joro/Documents/Phd/UPF/arias/laosheng-erhuang_04.melodia'
fromTs = 49.85
toTs = 55.00
fromTs = 0
toTs = 858
inputFile = '../sounds/vignesh.wav'
melodiaInput = '../sounds/vignesh.melodia'
fromTs = 0
toTs = 2
# exatract spectrum
hfreq, hmag, hphase, fs, hopSizeMelodia, inputAudioFromTsToTs = extractHarmSpec(inputFile, melodiaInput, fromTs, toTs)
np.savetxt('hfreq_2', hfreq)
np.savetxt('hmag_2', hmag)
np.savetxt('hphase_2', hphase)
hfreq = np.loadtxt('hfreq_2')
hmag = np.loadtxt('hmag_2')
hphase = np.loadtxt('hphase_2')
# resynthesize
URIOutputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_harmonicModel.wav'
y = resynthesize(hfreq, hmag, hphase, fs, hopSizeMelodia, URIOutputFile)
visualizeHarmSp(inputAudioFromTsToTs, y, hopSizeMelodia )
| gpl-3.0 |
derekjchow/models | research/lfads/synth_data/generate_chaotic_rnn_data.py | 11 | 8412 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
import tensorflow as tf # used for flags here
from utils import write_datasets
from synthetic_data_utils import add_alignment_projections, generate_data
from synthetic_data_utils import generate_rnn, get_train_n_valid_inds
from synthetic_data_utils import nparray_and_transpose
from synthetic_data_utils import spikify_data, gaussify_data, split_list_by_inds
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
matplotlib.rcParams['image.interpolation'] = 'nearest'
DATA_DIR = "rnn_synth_data_v1.0"
flags = tf.app.flags
flags.DEFINE_string("save_dir", "/tmp/" + DATA_DIR + "/",
"Directory for saving data.")
flags.DEFINE_string("datafile_name", "thits_data",
"Name of data file for input case.")
flags.DEFINE_string("noise_type", "poisson", "Noise type for data.")
flags.DEFINE_integer("synth_data_seed", 5, "Random seed for RNN generation.")
flags.DEFINE_float("T", 1.0, "Time in seconds to generate.")
flags.DEFINE_integer("C", 100, "Number of conditions")
flags.DEFINE_integer("N", 50, "Number of units for the RNN")
flags.DEFINE_integer("S", 50, "Number of sampled units from RNN")
flags.DEFINE_integer("npcs", 10, "Number of PCS for multi-session case.")
flags.DEFINE_float("train_percentage", 4.0/5.0,
"Percentage of train vs validation trials")
flags.DEFINE_integer("nreplications", 40,
"Number of noise replications of the same underlying rates.")
flags.DEFINE_float("g", 1.5, "Complexity of dynamics")
flags.DEFINE_float("x0_std", 1.0,
"Volume from which to pull initial conditions (affects diversity of dynamics.")
flags.DEFINE_float("tau", 0.025, "Time constant of RNN")
flags.DEFINE_float("dt", 0.010, "Time bin")
flags.DEFINE_float("input_magnitude", 20.0,
"For the input case, what is the value of the input?")
flags.DEFINE_float("max_firing_rate", 30.0, "Map 1.0 of RNN to a spikes per second")
FLAGS = flags.FLAGS
# Note that with N small, (as it is 25 above), the finite size effects
# will have pretty dramatic effects on the dynamics of the random RNN.
# If you want more complex dynamics, you'll have to run the script a
# lot, or increase N (or g).
# Getting hard vs. easy data can be a little stochastic, so we set the seed.
# Pull out some commonly used parameters.
# These are user parameters (configuration)
rng = np.random.RandomState(seed=FLAGS.synth_data_seed)
T = FLAGS.T
C = FLAGS.C
N = FLAGS.N
S = FLAGS.S
input_magnitude = FLAGS.input_magnitude
nreplications = FLAGS.nreplications
E = nreplications * C # total number of trials
# S is the number of measurements in each datasets, w/ each
# dataset having a different set of observations.
ndatasets = N/S # ok if rounded down
train_percentage = FLAGS.train_percentage
ntime_steps = int(T / FLAGS.dt)
# End of user parameters
rnn = generate_rnn(rng, N, FLAGS.g, FLAGS.tau, FLAGS.dt, FLAGS.max_firing_rate)
# Check to make sure the RNN is the one we used in the paper.
if N == 50:
assert abs(rnn['W'][0,0] - 0.06239899) < 1e-8, 'Error in random seed?'
rem_check = nreplications * train_percentage
assert abs(rem_check - int(rem_check)) < 1e-8, \
'Train percentage * nreplications should be integral number.'
# Initial condition generation, and condition label generation. This
# happens outside of the dataset loop, so that all datasets have the
# same conditions, which is similar to a neurophys setup.
condition_number = 0
x0s = []
condition_labels = []
for c in range(C):
x0 = FLAGS.x0_std * rng.randn(N, 1)
x0s.append(np.tile(x0, nreplications)) # replicate x0 nreplications times
# replicate the condition label nreplications times
for ns in range(nreplications):
condition_labels.append(condition_number)
condition_number += 1
x0s = np.concatenate(x0s, axis=1)
# Containers for storing data across data.
datasets = {}
for n in range(ndatasets):
print(n+1, " of ", ndatasets)
# First generate all firing rates. in the next loop, generate all
# replications this allows the random state for rate generation to be
# independent of n_replications.
dataset_name = 'dataset_N' + str(N) + '_S' + str(S)
if S < N:
dataset_name += '_n' + str(n+1)
# Sample neuron subsets. The assumption is the PC axes of the RNN
# are not unit aligned, so sampling units is adequate to sample all
# the high-variance PCs.
P_sxn = np.eye(S,N)
for m in range(n):
P_sxn = np.roll(P_sxn, S, axis=1)
if input_magnitude > 0.0:
# time of "hits" randomly chosen between [1/4 and 3/4] of total time
input_times = rng.choice(int(ntime_steps/2), size=[E]) + int(ntime_steps/4)
else:
input_times = None
rates, x0s, inputs = \
generate_data(rnn, T=T, E=E, x0s=x0s, P_sxn=P_sxn,
input_magnitude=input_magnitude,
input_times=input_times)
if FLAGS.noise_type == "poisson":
noisy_data = spikify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
elif FLAGS.noise_type == "gaussian":
noisy_data = gaussify_data(rates, rng, rnn['dt'], rnn['max_firing_rate'])
else:
raise ValueError("Only noise types supported are poisson or gaussian")
# split into train and validation sets
train_inds, valid_inds = get_train_n_valid_inds(E, train_percentage,
nreplications)
# Split the data, inputs, labels and times into train vs. validation.
rates_train, rates_valid = \
split_list_by_inds(rates, train_inds, valid_inds)
noisy_data_train, noisy_data_valid = \
split_list_by_inds(noisy_data, train_inds, valid_inds)
input_train, inputs_valid = \
split_list_by_inds(inputs, train_inds, valid_inds)
condition_labels_train, condition_labels_valid = \
split_list_by_inds(condition_labels, train_inds, valid_inds)
input_times_train, input_times_valid = \
split_list_by_inds(input_times, train_inds, valid_inds)
# Turn rates, noisy_data, and input into numpy arrays.
rates_train = nparray_and_transpose(rates_train)
rates_valid = nparray_and_transpose(rates_valid)
noisy_data_train = nparray_and_transpose(noisy_data_train)
noisy_data_valid = nparray_and_transpose(noisy_data_valid)
input_train = nparray_and_transpose(input_train)
inputs_valid = nparray_and_transpose(inputs_valid)
# Note that we put these 'truth' rates and input into this
# structure, the only data that is used in LFADS are the noisy
# data e.g. spike trains. The rest is either for printing or posterity.
data = {'train_truth': rates_train,
'valid_truth': rates_valid,
'input_train_truth' : input_train,
'input_valid_truth' : inputs_valid,
'train_data' : noisy_data_train,
'valid_data' : noisy_data_valid,
'train_percentage' : train_percentage,
'nreplications' : nreplications,
'dt' : rnn['dt'],
'input_magnitude' : input_magnitude,
'input_times_train' : input_times_train,
'input_times_valid' : input_times_valid,
'P_sxn' : P_sxn,
'condition_labels_train' : condition_labels_train,
'condition_labels_valid' : condition_labels_valid,
'conversion_factor': 1.0 / rnn['conversion_factor']}
datasets[dataset_name] = data
if S < N:
# Note that this isn't necessary for this synthetic example, but
# it's useful to see how the input factor matrices were initialized
# for actual neurophysiology data.
datasets = add_alignment_projections(datasets, npcs=FLAGS.npcs)
# Write out the datasets.
write_datasets(FLAGS.save_dir, FLAGS.datafile_name, datasets)
| apache-2.0 |
willhess/aima-python | submissions/Blue/myNN.py | 10 | 3071 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Blue import music
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
musicATRB = DataFrame()
musicATRB.data = []
targetData = []
'''
Extract data from the CORGIS Music Library.
Most 'hit' songs average 48-52 bars and no more than ~3 minutes (180 seconds)...
'''
allSongs = music.get_songs()
for song in allSongs:
try:
length = float(song['song']["duration"])
targetData.append(length)
genre = song['artist']['terms'] #String
title = song['song']['title'] #String
# release = float(song['song']['Release'])
musicATRB.data.append([genre, title])
except:
traceback.print_exc()
musicATRB.feature_names = [
'Genre',
'Title',
'Release',
'Length',
]
musicATRB.target = []
def musicTarget(release):
if (song['song']['duration'] <= 210
): #if the song is less that 3.5 minutes (210 seconds) long
return 1
return 0
for i in targetData:
tt = musicTarget(i)
musicATRB.target.append(tt)
musicATRB.target_names = [
'Not a hit song',
'Could be a hit song',
]
Examples = {
'Music': musicATRB,
}
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (100,),
activation = 'relu',
solver='sgd', # 'adam',
alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
musicScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(musicATRB.data)
musicScaled.data = scaleGrid(musicATRB.data)
musicScaled.feature_names = musicATRB.feature_names
musicScaled.target = musicATRB.target
musicScaled.target_names = musicATRB.target_names
Examples = {
'musicDefault': {
'frame': musicATRB,
},
'MusicSGD': {
'frame': musicATRB,
'mlpc': mlpc
},
'MusisScaled': {
'frame': musicScaled,
},
} | mit |
drubinstein/focus | kfkd.py | 1 | 3599 | # file kfkd.py
import os
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
import matplotlib.pyplot as pyplot
from six.moves import cPickle
FTRAIN = '~/data/kaggle-facial-keypoint-detection/training.csv'
FTEST = '~/data/kaggle-facial-keypoint-detection/test.csv'
def plot_sample(x, y, axis):
img = x.reshape(96, 96)
axis.imshow(img, cmap='gray')
axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)
def load(test=False, cols=None):
"""Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Pass a list of *cols* if you're only interested in a subset of the
target columns.
"""
fname = FTEST if test else FTRAIN
df = read_csv(os.path.expanduser(fname)) # load pandas dataframe
# The Image column has pixel values separated by space; convert
# the values to numpy arrays:
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
if cols: # get a subset of columns
df = df[list(cols) + ['Image']]
print(df.count()) # prints the number of values for each column
df = df.dropna() # drop all rows that have missing values in them
X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]
X = X.astype(np.float32)
if not test: # only FTRAIN has any target columns
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # scale target coordinates to [-1, 1]
X, y = shuffle(X, y, random_state=42) # shuffle train data
y = y.astype(np.float32)
else:
y = None
return X, y
X, y = load()
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
y.shape, y.min(), y.max()))
net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, 9216), # 96x96 input pixels per batch
hidden_num_units=100, # number of units in hidden layer
output_nonlinearity=None, # output layer uses identity function
output_num_units=30, # 30 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=400, # we want to train this many epochs
verbose=1,
)
X, y = load()
net1.fit(X, y)
print('Saving the model to nn.mdl')
f = open('nn.mdl', 'wb')
cPickle.dump(net1, f, protocol = cPickle.HIGHEST_PROTOCOL)
f.close()
print('Plotting training and validation')
train_loss = np.array([i["train_loss"] for i in net1.train_history_])
valid_loss = np.array([i["valid_loss"] for i in net1.train_history_])
pyplot.plot(train_loss, linewidth=3, label="train")
pyplot.plot(valid_loss, linewidth=3, label="valid")
pyplot.grid()
pyplot.legend()
pyplot.xlabel("epoch")
pyplot.ylabel("loss")
pyplot.ylim(1e-3, 1e-2)
pyplot.yscale("log")
pyplot.show(block=False)
X, _ = load(test=True)
y_pred = net1.predict(X)
print X.shape, y_pred.shape
print('Plotting faces')
fig = pyplot.figure(figsize=(6, 6))
fig.subplots_adjust(
left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(16):
ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
plot_sample(X[16*i], y_pred[16*i], ax)
pyplot.show()
| mit |
Carmezim/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 9 | 67662 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
# Expected is 100, but because of the global step increment bug, this is 51.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
songjs1993/DeepLearning | 3CNN/ensemble_svd.py | 1 | 8079 | # Auther: Alan
import random
from PIL import Image
from numpy import *
import os
from sklearn.linear_model import LogisticRegression
from sklearn.calibration import calibration_curve
import numpy as np
from sklearn import svm
from sklearn import metrics
def get_embeddings(bash_path):
# 从文件中读取每一个照片,使用svd分解,获取其特征值,作为特征
embeddings = {}
# box = (0, 0, 100, 100)
cnt = 0
for parent, dirnames, filenames in os.walk(bash_path):
# 对每一个文件夹
# print(parent,filenames)
# if parent[11:] not in embeddings:
# if cnt>200:
# break
if cnt%10==0:
print("deal with fold number : %d/%d" %(cnt, 5749))
cnt += 1
if len(parent) > 11:
embeddings[parent[11:]] = []
for filename in filenames:
# 是图片
# parent = parent[11:]
lena = Image.open(parent + "/" + filename) #统一大小为250*250
# print(lena.size)
# lena = Image.open(parent + "/" + filename).crop(box)
lena = np.reshape(lena, [-1])
embedding = []
for i in range(3):
data = lena[i*250*250: (i+1)*250*250]
data = np.reshape(data, [250,250])
u, sigma, vt = linalg.svd(data)
embedding.extend(sigma[0:100]) #取前100维
# 一个样本的特征值
# print(embedding)
embeddings[parent[11:]].append(embedding)
# save(embeddings)
print("get embeddings!")
return embeddings
def saveX(file_path, trainX):
with open(file_path, "w") as f:
# f.write(str(len(embeddings)) + " 600\n")
for i in range(len(trainX)):
f.write(str(i))
for x in trainX[i]:
f.write(" " + str(x))
f.write("\n")
print("save: " + file_path)
def saveY(file_path, trainY):
with open(file_path, "w") as f:
# f.write(str(len(embeddings)) + " 600\n")
for i in range(len(trainY)):
f.write(str(trainY[i]) + "\n")
print("save: " + file_path)
def save_data(trainX, trainY, testX, testY):
saveX("./trainX", trainX)
saveY("./trainY", trainY)
saveX("./testX", testX)
saveY("./testY", testY)
print("save data finish!")
def readX(file_path):
trainX = []
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
words = line.split(" ")
words = words[1:]
x = []
for word in words:
x.append(float(word)) # 注意转换回float类型
trainX.append(x)
return trainX
def readY(file_path):
trainY = []
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
trainY.append(float(line)) # 注意转换回float类型
return trainY
def read_data():
trainX = readX("./trainX")
trainY = readY("./trainY")
testX = readX("./testX")
testY = readY("./testY")
print("read data finish!")
return np.array(trainX), np.array(trainY), np.array(testX), np.array(testY)
def get_data(bash_path, train_file, embeddings):
trainX = []
trainY = []
with open(bash_path + train_file, "r") as f:
lines = f.readlines()
# train_num = int(lines[0][:-1])
lines = lines[1:]
# lines = lines[1:10]
for line in lines:
words = line.split("\t")
if len(words)<=3:
# 表示是正样本
first = int(words[1])-1
second = int(words[2][:-1])-1
x = []
x.extend(embeddings[words[0]][first])
x.extend(embeddings[words[0]][second])
trainX.append(x)
trainY.append(1.0)
else:
# 负样本
first = int(words[1])-1
second = int(words[3][:-1])-1
x = []
x.extend(embeddings[words[0]][first])
x.extend(embeddings[words[2]][second])
trainX.append(x)
trainY.append(0.0)
return trainX, trainY
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
def logistic_regression_pre(bash_path, train_file, test_file):
"""
使用LR进行测试
:param train_file:
:param test_file:
:param rate_feature: 特征一共300个,选择比率
:param rate_example: 样本选择比率
:return:
"""
trainX, trainY, testX, testY = read_data()
# clf = svm.SVC(kernel='rbf', gamma=0.7, C=1.0).fit(trainX, trainY)
# y_predicted = clf.predict(testX)
W = np.ones(trainX.shape[0]) / trainX.shape[0]
clf = DecisionTreeClassifier(max_depth=3).fit(trainX, trainY, sample_weight=W)
# clf = LogisticRegression().fit(trainX, trainY, sample_weight=W)
y_predicted = clf.predict(testX)
print("fit finish!")
print("accuracy: ",metrics.accuracy_score(testY, y_predicted))
# fraction_of_positives, mean_predicted_value = calibration_curve(testY, prob_pos)
# print("mean_predicted_value:", mean_predicted_value, fraction_of_positives)
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
def ensemble(bash_path, train_file, test_file):
train_x, train_y, test_x, test_y = read_data()
W = np.ones(train_x.shape[0]) / train_x.shape[0]
M = 100
K = 2
D = 2
print("D: ",D)
alpha = np.empty(M)
g = []
for m in range(M):
g.append(DecisionTreeClassifier(max_depth=D))
g[m].fit(train_x, train_y, sample_weight=W)
pre_y = g[m].predict(train_x)
I = (pre_y != train_y)
e = I.dot(W)
alpha[m] = np.sqrt((1 - e) * (K - 1) / e)
W = W * I * alpha[m] + W * -I / alpha[m]
W = normalize(W.reshape(1, -1), norm='l1').reshape(train_x.shape[0])
alpha = np.log(alpha)
pre_y = np.zeros((test_x.shape[0], K))
accu = np.empty(M)
for m in range(M):
print("m:", m)
pre_y += alpha[m] * g[m].predict_proba(test_x)
accu[m] = np.sum(np.argmax(pre_y, axis=1) == test_y) / test_x.shape[0]
print("无提升准确率:\t", accu[0])
print("最大提升准确率:\t", np.max(accu))
print("最大提升迭代轮数:\t", np.argmax(accu))
plt.plot([accu[0]] * M, label="base learner")
plt.plot(accu, label="my")
plt.xlabel("iteration")
plt.ylabel("accuracy rate (%)")
plt.title("Adaboost (tree max_depth:%d)" % D)
plt.show()
print("sklearn...")
# 使用sklearn自带AdaBoostClassifier进行测试
accu2 = []
for m in range(1, M + 1):
print("m:", m)
clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=D), n_estimators=m)
clf.fit(train_x, train_y)
pre_y = clf.predict(test_x)
accu2.append(np.sum(pre_y == test_y) / test_x.shape[0])
print("Sklearn")
print("无提升准确率:\t", accu2[0])
print("最大提升准确率:\t", np.max(accu2))
print("最大提升迭代轮数:\t", np.argmax(accu2))
plt.plot(accu2, label="sklearn")
plt.legend(loc="lower right")
plt.xlabel("iteration")
plt.ylabel("accuracy rate (%)")
plt.title("Adaboost (tree max_depth:%d)" % D)
plt.show()
if __name__ =="__main__":
# 特征:SVD
# 模型:LR
bash_path = "./lfw_data/"
train_file = "_pairsDevTrain.txt"
test_file = "_pairsDevTest.txt"
# embeddings = get_embeddings(bash_path)
# trainX, trainY = get_data(bash_path, train_file, embeddings)
# testX, testY = get_data(bash_path, test_file, embeddings)
# print("get train and test data!")
#
# # 保存训练和测试数据 --- 之后直接读取
# save_data(trainX, trainY, testX, testY)
# print("save train and test data!")
# logistic_regression_pre(bash_path, train_file, test_file)
ensemble(bash_path, train_file, test_file)
| apache-2.0 |
nicholasyager/turbulent_swarm_modeling | swarm.py | 1 | 12690 | #!/usr/bin/python2
import os
import time
import random
import math
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axis3d
import matplotlib.animation as animation
from subprocess import call
def weibull(x,k,l):
"""Determine the probability at x with k and l."""
return (k/l) * math.pow(x/l,k-1) * math.exp( -math.pow( x/l, k) )
def gaussian(x, mu, sigma):
"""Determine the probability at x with mu and sigma."""
return (1/(sigma * math.pow(2*3.14159,2)) * \
math.exp( -( math.pow(x - mu,2) / ( 2 * math.pow(sigma,2) ))))
def generateCurrent(position, currentType, w):
"""Generate a current of a particular geometry with velocity w."""
# In these intial stages, it may be best to use a form of
# circular current. Vector velocity is based on angular
# velocity w.
distance = math.sqrt( math.pow(5 - position[0],2) + \
math.pow(5 - position[2],2))
v = w * gaussian(distance, 5, 3) * 5
angle = math.atan( (position[2] - 5 ) / (position[0] - 5) )
# Quadrant II
if position[0] < 5 and position[2] > 5:
angle = 3.14159 + angle
# Quadrant III
elif position[0] < 5 and position[2] < 5:
angle = 3.14159 + angle
# Quadrant IV
elif position[0] > 5 and position[2] < 5:
angle = (2 * 3.14159) + angle
angle -= (3.14159/2)
vector = [0,0,0]
vector[0] += v * math.cos(angle)
vector[2] += v * math.sin(angle)
return vector
def configure_figure(ax, tick, SWARM_NUMBER_FISH,width):
"""Configure the matplotlib plot to the proper constraints."""
# Write the details of the current tick onto the image.
ax.text2D(0.05, 0.9, TIME + "\nTick: " + str(tick) + "\nPop: "+str(SWARM_NUMBER_FISH) ,
transform=ax.transAxes)
# Assign labels to the graph
ax.set_xlabel('X')
ax.set_ylabel('Z')
ax.set_zlabel('Y')
# Padding
pad = 2
# Set the limits for the graph
ax.set_xlim3d(0-pad, width+pad)
ax.set_ylim3d(0-pad, width+pad)
ax.set_zlim3d(0-pad, width+pad)
return ax
def pad(n, symbol, string):
"""Pad the string with a symbol n times."""
while len(string) < n:
string = symbol + string
return string
class Fish:
def __init__(self, width, height, depth, density):
"""Initialize a Fish object, giving it a random position at start."""
# Set a
self.ID = 0
self.x = random.gauss(width/2, density)
self.y = random.gauss(height/2, density)
self.z = random.gauss(depth/2, density)
self.vector = [0,0,0]
return
def setID(self, ID):
"""Save the ID number for this fish for later calculations."""
self.ID = ID
return
def setVector(self, vector):
"""Write the fish's vector to a buffer to be used on the next timestep.
"""
# Store the vector for future use without commiting it within this
# current step. This would create a bias in future heading detection.
self.future_vector = vector
def move(self, timestep):
"""Read the vectors and move accordingly."""
self.vector = self.future_vector
self.x += self.vector[0] * timestep
self.y += self.vector[1] * timestep
self.z += self.vector[2] * timestep
def findClosest(self, swarm, n):
"""Return the closest n fish to a given fish within a swarm."""
position = self.getLocation()
partners = []
for otherFish in swarm:
if otherFish.ID != self.ID:
otherLocation = otherFish.getLocation()
distance = self.getDistance( otherLocation )
partners.append( (distance,otherFish) )
partners.sort()
return partners[:n]
# Reporter functons
def getLocation(self):
"""Returns the x, y, and z coordinates of the fish."""
return [self.x, self.y, self.z]
def getVector(self):
"""Returns the x, y, and z coordinates of the fish."""
return self.vector
def getDistance(self, location):
"""Returns the cartesian distance between this fish and a different
location."""
return math.sqrt( math.pow(location[0] - self.x,2) + \
math.pow(location[1] - self.y,2) + \
math.pow(location[2] - self.z,2))
class Swarm:
def __init__(self, n, d, r, width, height, depth, dampen):
"""In a world of a given width, height, and depth, create a swarm of n
size, with a start density d which equilibriates through swarm
mentality to a desired radius r."""
self.r = r
print("Creating a swarm of {0} with d={1}".format(n,d))
# Iterate through the loop and create fish.
self.swarm = []
self.dampen = dampen
for x in range(0, n):
newFish = Fish(width, height, depth, d)
newFish.setID(x)
self.swarm.append(newFish)
def evaluate(self, w, timestep):
"""Evaluate the swarm as individuals, choosing each randomly with
exclusion."""
# Evaluation.
#
# For evaluation, each fish is examined at the current time and a new
# heading and velocity is calculated. This heading and velocity is
# determined in this order:
#
# 1. Find the 5 closest fish (the local group) and move to within an
# "equilibrium" distance, as defined by two opposing Gaussian
# distributions. This serves to keep a local and macro swarm
# together as well as moving in similar directions in turbulent
# fluids.
# 2. Turbulent forces will move the fish in a particular direction
# based on their respective positions. This is currently the major
# factor in swarm movement within the simulation.
# 3. The average heading of the swarm should be considered so that
# members of the swarm move in similar directions as their
# neighbors. This is a tricky problem that I have yet to implement
# into the simulation until I have found a way to bypass bias.
for locus in range(0,len(self.swarm)):
vector = [0,0,0] # The new vector
locusFish = self.swarm[locus] # The fish to be examined
position = locusFish.getLocation()
# Local group swarming
closestFish = locusFish.findClosest(self.swarm,5)
localHeading = [0,0,0]
for distance, fish in closestFish:
location = fish.getLocation()
heading = fish.getVector()
localHeading[0] += heading[0]
localHeading[1] += heading[1]
localHeading[1] += heading[1]
local_magnatude = gaussian(distance, 3, 2) * 10 # Attractor
local_magnatude -= gaussian(distance, 0, 1) * 3 # Repulsor
localHeading[0] = localHeading[0] / 5
localHeading[1] = localHeading[1] / 5
localHeading[2] = localHeading[2] / 5
# Turbulent swarming
currentVector = generateCurrent( position, "circle", w)
# Sum the vector components
for coord in range(0, 3):
local_component = local_magnatude * (
(location[coord] - position[coord]) / \
distance )
vector[coord] += ((currentVector[coord] + local_component) + \
localHeading[coord])/2
# Check for boundries
if (position[coord] < 0 and vector[coord] < 0):
vector[coord] += position[coord]/2 * vector[coord]
if (position[coord] >= 10 and vector[coord] > 0):
vector[coord] -= (position[coord] - 10) /2 * vector[coord]
locusFish.setVector(vector)
for fish in self.swarm:
fish.move(timestep)
return
def visualize(self, ax):
"""Plot the current locations of each fish."""
# Clear the previous graph
ax.clear()
# Create empty lists for the current tick
xvalues, yvalues, zvalues = [], [], []
# Fill the lists with fish positions.
for fish in self.swarm:
xvalues.append(fish.x)
yvalues.append(fish.y)
zvalues.append(fish.z)
# Plot the fish on a new graph
ax.scatter(xvalues, zvalues, yvalues)
# I must return the updated axis object.
return ax
def write(self, t):
"""Write the current state of the swarm at time t to a csv file of
location."""
#DEV: A relic from an older time. This should be updated soon.
if t < 100:
spacer = 0
else:
spacer = ""
if t < 10:
spacer2 = 0
else:
spacer2 = ""
# Create a .csv file.
global TIME
swarmFile = file(TIME + '/swarm_{0}{1}{2}.csv'.format(spacer,spacer2,t), 'w')
# Write the location and vector of each fish to the .csv file.
swarmFile.write('"x","y","z","i","j","k"\n')
for fish in self.swarm:
location = fish.getLocation()
vector = fish.getVector()
swarmFile.write('{0},{1},{2},{3},{4},{5}\n'.format(location[0],
location[1],
location[2],
vector[0],
vector[1],
vector[2]))
swarmFile.close()
return
random.seed(100) # Set an arbitrary seed!
# Set the world variables
WORLD_WIDTH = 10 # Arbitrarily set width
WORLD_HEIGHT = 10
WORLD_DEPTH = 10
SIMULATION_TIMESTEP = 0.5
SIMULATION_TIME = 100
SIMULATION_SPIN = 1
SWARM_NUMBER_FISH = 25
SWARM_DENSITY = 4
SWARM_RADIUS = 1
SWARM_DAMPEN = 1
global TIME
TIME = time.strftime("%Y-%m-%dT%H%M%S", time.gmtime())
os.makedirs(TIME)
#################### Handle Plotting ###########################
fig = plt.figure(figsize=(12,9))
ax = fig.add_subplot(111, projection='3d')
for axis in ax.w_xaxis, ax.w_yaxis, ax.w_zaxis:
for elt in axis.get_ticklines() + axis.get_ticklabels():
#elt.set_visible(False)
#axis.pane.set_visible(False)
axis.gridlines.set_visible(False)
#axis.line.set_visible(False)
#plt.show(block=False)
################### Simulate ###################################
# Create a swarm
swarm = Swarm(SWARM_NUMBER_FISH,
SWARM_DENSITY,
SWARM_RADIUS,
WORLD_WIDTH,
WORLD_HEIGHT,
WORLD_DEPTH,
SWARM_DAMPEN)
# Simulation Loop
#
# For each tick in the simulation, evaluate the swarm at it's current location,
# and then render and save a 3D plot of the state.
for tick in range(0,SIMULATION_TIME):
#swarm.write(tick) # Write the positions of the fish at the given timestep.
swarm.evaluate(SIMULATION_SPIN,SIMULATION_TIMESTEP)
## Visualize the swarm behavior, and save the images ##
ax = swarm.visualize(ax)
ax = configure_figure(ax, tick, SWARM_NUMBER_FISH, WORLD_WIDTH)
## Properly name the file in a way to animate in the correct order.
filenumber = pad( len(str(SIMULATION_TIME)) , "0", str(tick))
plt.savefig(TIME+'/swarm_{0}.png'.format(filenumber),
bbox_inches='tight')
plt.draw()
print("Tick: " + str(tick))
# Image Modification:
#
# This step takes the produced images, and from them creates a .gif animation
# of the simulation. This modification takes three steps:
#
# 1. Trim the images to remove all excess white and transparent space.
# 2. Resize the images to a more manageable format. More careful production of
# initial images will reduce the need for resizing in the future.
# 3. Take all of the processed images and merge them into a .gif file.
#print("Triming images..."),
#call(["mogrify","-trim","+repage", TIME+"/*"])
#print(" Complete")
print("Resizing Images..."),
call(["mogrify","-resize","600x478!",TIME+"/*"])
print(" Complete")
print("Generating animation..."),
call(["ffmpeg","-r", "15", "-pattern_type", "glob","-i", TIME+"/*.png",
"-c:v", "libx264", "-pix_fmt", "yuv420p",
TIME+"/swarm.mp4"])
print(" Complete")
print("Simulation saved to "+TIME+"/")
print("Simulation complete.")
| gpl-2.0 |
gjsun/MPack | MPack_Core/Ks_Mstar_Estimate.py | 1 | 6218 | import numpy as np
import os as os
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline, InterpolatedUnivariateSpline, interp1d
# Get C[II] redshift
from COPS_example import z_CII, z_obs_CO
# Get the path of the master catalog file
_HOME = os.environ.get('HOME')
rand = np.random.RandomState(42)
np.set_printoptions(precision=4,suppress=True)
############### Mass Estimator (from R. Quadri) ###############
# Data points for interpolation
zmeans = [0.200, 0.400, 0.600, 0.800, 1.050, 1.350, 1.650, 2.000, 2.400, 2.900, 3.500]
intercepts = [18.2842,18.9785,19.2706,19.1569,20.5633,21.5504,19.6128,19.8258,19.8795,23.1529,22.1678]
slopes = [-0.454737,-0.457170,-0.454706,-0.439577,-0.489793,-0.520825,-0.436967,-0.447071,-0.443592,-0.558047,-0.510875]
slopes_cols = [0.0661783,-0.0105074,0.00262891,0.140916,0.0321968,0.0601271,0.470524,0.570098,0.455855,0.0234542,0.0162301]
intercepts_b = [18.3347,18.9626,19.2789,19.6839,20.7085,21.8991,22.9160,24.1886,22.6673,23.1514,21.6482]
slopes_b = [-0.456550,-0.456620,-0.455029,-0.460626,-0.495505,-0.534706,-0.570496,-0.617651,-0.543646,-0.556633,-0.487324]
data_list = [zmeans, intercepts, slopes, slopes_cols, intercepts_b, slopes_b]
# Interpolate the data points, and force the extrapolation to asymptote the mean of data points.
def intercept_full(z, flag='color_magnitude'):
dist = np.maximum((z - 0.2)*(z - 3.5), 0)
# Interpolating data
if flag == 'color_magnitude':
sm = InterpolatedUnivariateSpline(zmeans, intercepts, k=3)
elif flag == 'magnitude_only':
sm = InterpolatedUnivariateSpline(zmeans, intercepts_b, k=3)
else:
raise ValueError('Invalid flag name!')
# Forcing extrapolation to asymptote
ans = sm(z) * np.exp(-dist) + np.mean(intercepts) * (1.-np.exp(-dist))
return ans
def slope_full(z, flag='color_magnitude'):
dist = np.maximum((z - 0.2)*(z - 3.5), 0)
if flag == 'color_magnitude':
sm = InterpolatedUnivariateSpline(zmeans, slopes, k=3)
elif flag == 'magnitude_only':
sm = InterpolatedUnivariateSpline(zmeans, slopes_b, k=3)
else:
raise ValueError('Invalid flag name!')
ans = sm(z) * np.exp(-dist) + np.mean(slopes) * (1.-np.exp(-dist))
return ans
def slope_col_full(z, flag='color_magnitude'):
dist = np.maximum((z - 0.2)*(z - 3.5), 0)
if flag == 'color_magnitude':
sm = InterpolatedUnivariateSpline(zmeans, slopes_cols, k=3)
ans = sm(z) * np.exp(-dist) + np.mean(slopes_cols) * (1.-np.exp(-dist))
return ans
elif flag == 'magnitude_only':
return 0
else:
raise ValueError('Invalid flag name!')
func_list = [intercept_full, slope_full, slope_col_full]
# Define the mass estimation function
def Mass(K, JK, z):
""" Return the mass estimate for the given K-band magnitude, J-K color and redshift """
if (JK < 0.) or (JK > 5.):
flag = 'magnitude_only'
else:
flag = 'color_magnitude'
model = slope_full(z, flag) * K + intercept_full(z, flag) + slope_col_full(z, flag) * JK
return model
Mass = np.vectorize(Mass)
# Flux to magnitude conversion adopted by the UltraVISTA catalog
def FluxToMagnitude(flux, ap_corr):
return 25.0 - 2.5*np.log10(flux*ap_corr)
#################### Extended Bootstrapping Technique (EBT) ####################
# --- EBT assembles new bins for stacking, rather than drawing from original bins
# Step 1: draw simulated redshifts from the photometric redshift probability distribution
# Step 2: estimate the mass using the perturbed redshift and observed K magnitude and J-K color
# Step 3: a simulated catalog is split up into (original) bins and calculate new stacked flux densities
# Step 4: repeat Step 1-3 many (>1000) times to complete the "bootstrapping"
n_bt = 1000 # Number of bootstrapping
path_cat = '/Desktop/Caltech_OBSCOS/DataCollection/simstack_catalogs/UVISTA/DR2/UVISTA_DR2_master_v2.1_USE.csv'
path_cat = _HOME + path_cat
col_to_read = ['ra','dec','z_peak','l68','u68','J','Ks','ap_corr','lmass','rf_U_V','rf_V_J']
df_cat_in = pd.read_csv(path_cat,usecols=col_to_read)
header_list = list(df_cat_in.columns.values)
cat_in = df_cat_in.as_matrix(columns=df_cat_in.columns)
n_sources = cat_in.shape[0]; n_params = cat_in.shape[1]
#print 'Size Read-In: ', cat_in.shape
c_z_peak = header_list.index('z_peak')
c_z_l68 = header_list.index('l68')
c_z_u68 = header_list.index('u68')
c_J = header_list.index('J')
c_Ks = header_list.index('Ks')
c_ap_corr = header_list.index('ap_corr')
c_lmass = header_list.index('lmass')
### Redshift of C[II] Signal ###
z_CII = 6.5
z_CO_32 = z_obs_CO(3,z_CII)
z_CO_43 = z_obs_CO(4,z_CII)
#if z_CII == 6.0:
# z_CO_32 = 0.27
#if z_CII == 6.5:
# z_CO_32 = 0.36
#elif z_CII == 7.0:
# z_CO_32 = 0.46
inds3 = np.where( (cat_in[:,c_z_peak]>=z_CO_32-0.01) & (cat_in[:,c_z_peak]<=z_CO_32+0.01) )[0]
#print '----- Ks-logM relation is estimated at z in [%.2f, %.2f] with %d galaxies -----' % (z_CO_32-0.01, z_CO_32+0.01, np.size(inds3))
xpts3 = cat_in[inds3,c_lmass]
ypts3 = FluxToMagnitude(cat_in[inds3,c_Ks], cat_in[inds3,c_ap_corr])
fit_coeff3 = np.polyfit(xpts3,ypts3,1)
def fit_Ks_logM_3(logM):
return fit_coeff3[1] + fit_coeff3[0] * logM
def fit_logM_Ks_3(Ks):
return (Ks - fit_coeff3[1]) / fit_coeff3[0]
#print 'J32: ', fit_Ks_logM_3(9.0)
#print 'J32: ', fit_logM_Ks_3(22.0)
#xss = np.linspace(8.,11.,100)
#plt.plot(xpts3, ypts3, 'b+')
#plt.plot(xss, fit_Ks_logM_3(xss), 'r-')
#plt.show()
#if z_CII == 6.0:
# z_CO_43 = 0.70
#if z_CII == 6.5:
# z_CO_43 = 0.82
#elif z_CII == 7.0:
# z_CO_43 = 0.94
inds4 = np.where( (cat_in[:,c_z_peak]>=z_CO_43-0.01) & (cat_in[:,c_z_peak]<=z_CO_43+0.01) )[0]
#print '----- Ks-logM relation is estimated at z in [%.2f, %.2f] with %d galaxies -----' % (z_CO_43-0.01, z_CO_43+0.01, np.size(inds4))
xpts4 = cat_in[inds4,c_lmass]
ypts4 = FluxToMagnitude(cat_in[inds4,c_Ks], cat_in[inds4,c_ap_corr])
fit_coeff4 = np.polyfit(xpts4,ypts4,1)
def fit_Ks_logM_4(logM):
return fit_coeff4[1] + fit_coeff4[0] * logM
def fit_logM_Ks_4(Ks):
return (Ks - fit_coeff4[1]) / fit_coeff4[0]
#print 'J43: ', fit_Ks_logM_4(9.0) | mit |
anthrotype/freetype-py | examples/glyph-outline.py | 3 | 1282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Glyph outline rendering
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
face = Face('./Vera.ttf')
face.set_char_size( 4*48*64 )
flags = FT_LOAD_DEFAULT | FT_LOAD_NO_BITMAP
face.load_char('S', flags )
slot = face.glyph
glyph = slot.get_glyph()
stroker = Stroker( )
stroker.set(64, FT_STROKER_LINECAP_ROUND, FT_STROKER_LINEJOIN_ROUND, 0 )
glyph.stroke( stroker )
blyph = glyph.to_bitmap(FT_RENDER_MODE_NORMAL, Vector(0,0))
bitmap = blyph.bitmap
width, rows, pitch = bitmap.width, bitmap.rows, bitmap.pitch
top, left = blyph.top, blyph.left
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
plt.figure(figsize=(6,8))
plt.imshow(Z, interpolation='nearest', cmap=plt.cm.gray_r, origin='lower')
plt.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/linear_model/tests/test_bayes.py | 3 | 3376 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn.linear_model import Ridge
from sklearn import datasets
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_return_std():
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X_test = np.random.random((n_test, d))
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
| mit |
annayqho/TheCannon | code/lamost/mass_age/paper_plots/residuals_grid_CA.py | 1 | 2940 | import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
from math import log10, floor
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import numpy as np
def round_sig(x, sig=2):
if x < 0:
return -round(-x, sig-int(floor(log10(-x)))-1)
return round(x, sig-int(floor(log10(x)))-1)
names = ['\mbox{T}_{\mbox{eff}},', '\mbox{log g}', '\mbox{[Fe/H]}']
units = ['\mbox{K}', '\mbox{dex}', '\mbox{dex}']
snr_str = [r'SNR $\textless$ 50', r'50 $\textless$ SNR $\textless$ 100', r'SNR $\textgreater$ 100']
snr_str = snr_str[::-1]
cutoffs = [0, 50, 100, 10000]
cutoffs = cutoffs[::-1]
y_highs = [300, 0.5, 0.3]
x_lows = [4000, 1.1, -2.0, -0.08]
x_highs = [5300, 3.8, 0.5, 0.4]
direc = "../run_9b_reddening"
all_cannon = np.load("%s/all_cannon_labels.npz" %direc)['arr_0']
all_ids = np.load("../run_2_train_on_good/all_ids.npz")['arr_0']
all_apogee = np.load("../run_2_train_on_good/all_label.npz")['arr_0']
good_id = np.load("%s/tr_id.npz" %direc)['arr_0']
all_snr = np.load("../run_2_train_on_good/SNRs.npz")['arr_0']
subset = np.array([np.where(all_ids==val)[0][0] for val in good_id])
apogee = all_apogee[subset]
cannon = all_cannon
snr = all_snr[subset]
fig = plt.figure(figsize=(15,15))
gs = gridspec.GridSpec(3,3, wspace=0.3, hspace=0)
props = dict(boxstyle='round', facecolor='white', alpha=0.3)
for i in range(0, len(names)):
name = names[i]
unit = units[i]
for j in range(0, len(cutoffs)-1):
ax = plt.subplot(gs[j,i])
ax.axhline(y=0, c='k')
#ax.legend(fontsize=14)
choose = np.logical_and(snr < cutoffs[j], snr > cutoffs[j+1])
diff = (cannon[:,i] - apogee[:,i])[choose]
scatter = round_sig(np.std(diff), 3)
bias = round_sig(np.mean(diff), 3)
ax.hist2d(
apogee[:,i][choose], diff, range=[[x_lows[i], x_highs[i]], [-y_highs[i], y_highs[i]]], bins=30, norm=LogNorm(), cmap="gray_r")
if j < len(cutoffs) - 2:
ax.get_xaxis().set_visible(False)
ax.locator_params(nbins=5)
ax.tick_params(axis='y', labelsize=20)
ax.tick_params(axis='x', labelsize=20)
if j == 0:
ax.set_title(r"$\Delta %s_{\mbox{C/L-A}}$ [%s]" %(name, unit), fontsize=30)
if j == 2:
ax.set_xlabel("$%s$ [%s] from APOGEE" %(name, unit), fontsize=20)
textstr1 = '%s' %(snr_str[j])
ax.text(0.05, 0.95, textstr1, transform=ax.transAxes,
fontsize=20, verticalalignment='top', bbox=props)
textstr2 = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax.text(0.05, 0.05, textstr2, transform=ax.transAxes,
fontsize=20, verticalalignment='bottom', bbox=props)
#ax.set_xlabel(r"APOGEE %s $(%s)$" %(name, unit))
#ax.set_ylabel(r"Cannon-LAMOST %s $(%s)$" %(name, unit))
plt.savefig("residuals_grid_5label.png")
#plt.show()
| mit |
dkillick/cartopy | lib/cartopy/examples/always_circular_stereo.py | 5 | 1313 | __tags__ = ['Lines and polygons']
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature
def main():
fig = plt.figure(figsize=[10, 5])
ax1 = plt.subplot(1, 2, 1, projection=ccrs.SouthPolarStereo())
ax2 = plt.subplot(1, 2, 2, projection=ccrs.SouthPolarStereo(),
sharex=ax1, sharey=ax1)
fig.subplots_adjust(bottom=0.05, top=0.95,
left=0.04, right=0.95, wspace=0.02)
# Limit the map to -60 degrees latitude and below.
ax1.set_extent([-180, 180, -90, -60], ccrs.PlateCarree())
ax1.add_feature(cartopy.feature.LAND)
ax1.add_feature(cartopy.feature.OCEAN)
ax1.gridlines()
ax2.gridlines()
ax2.add_feature(cartopy.feature.LAND)
ax2.add_feature(cartopy.feature.OCEAN)
# Compute a circle in axes coordinates, which we can use as a boundary
# for the map. We can pan/zoom as much as we like - the boundary will be
# permanently circular.
theta = np.linspace(0, 2*np.pi, 100)
center, radius = [0.5, 0.5], 0.5
verts = np.vstack([np.sin(theta), np.cos(theta)]).T
circle = mpath.Path(verts * radius + center)
ax2.set_boundary(circle, transform=ax2.transAxes)
plt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
puchchi/stock_scraper_latest | MainDriver/TestingIndicatorData.py | 1 | 2175 | import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import pandas as pd
class kTestingIndicatorData:
def __init__(self, filename):
parser = lambda date: pd.datetime.strptime(date, '%Y-%m-%d')
self.df = pd.read_csv(filename, parse_dates = [0], date_parser = parser, index_col = "Date")
def CompileIndicators(self, delRows=0, delCols=[], trainingRows=0, trainingPer=0, trainingDataFName="", testingDataFName=""):
prevClose = pd.Series({0:0})
for i in range(self.df["Close"].count()-1):
prevClose = prevClose.append(pd.Series({i+1:self.df["Close"][i]}))
prevCloseDF = pd.DataFrame(prevClose, columns=["Previous Close"])
prevCloseDF.index = self.df.index
self.df = self.df.merge(prevCloseDF, left_index=True, right_index=True)
if delRows > 0:
self.df.drop(self.df.index[range(delRows)], inplace=True)
if delCols.__len__() > 0:
self.df.drop(delCols, axis=1, inplace=True)
trainingDF = pd.DataFrame()
testingDF = pd.DataFrame()
if trainingRows > 0:
trainingDF = self.df[:trainingRows]
testingDF = self.df[trainingRows+1:].copy()
elif trainingPer > 0:
totalRows = len(self.df.index)
trainingPerRows = int(totalRows * trainingPer / 100);
trainingDF = self.df[:trainingPerRows]
testingDF = self.df[trainingPerRows+1:].copy()
if len(trainingDataFName) > 0:
trainingDF.to_csv(trainingDataFName)
# Removing Close from testing data
testingDF.drop("Close", axis=1, inplace=True)
if len(testingDataFName) > 0:
testingDF.to_csv(testingDataFName)
if __name__ == "__main__":
testing = kTestingIndicatorData("data/niftyWithIndicatorsOriginal.csv")
testing.CompileIndicators(delRows=28, delCols=["Open", "High", "Low", "Volume", "+DI14", "-DI14","Signal Line", "MACD Histogram"], trainingRows=1500,
trainingDataFName="data/niftyTrainingData.csv", testingDataFName="data/niftyTestingData.csv")
| mit |
ltiao/scikit-learn | sklearn/tests/test_random_projection.py | 141 | 14040 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/feature_selection/tests/test_feature_select.py | 143 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| mit |
imcgreer/rapala | survey/nov2015data.py | 2 | 35181 | #!/usr/bin/env python
import os
import itertools
import numpy as np
from astropy.io import fits
from astropy.table import Table,join,vstack
from astropy.stats import sigma_clip
from astropy.wcs import WCS
import fitsio
import matplotlib.pyplot as plt
from matplotlib import ticker
from bokpipe.bokphot import aper_phot_image
from bass import ampNums,get_amp_index
import ps1cal
test_exptimes = np.array([100.]*6 + [25.,50.,100.,200.,400.])
def srcor(ra1,dec1,ra2,dec2,sep,return_sep=False):
from astropy.coordinates import SkyCoord,match_coordinates_sky
from astropy import units as u
c1 = SkyCoord(ra1,dec1,unit=(u.degree,u.degree))
c2 = SkyCoord(ra2,dec2,unit=(u.degree,u.degree))
idx,d2d,d3c = match_coordinates_sky(c1,c2)
ii = np.where(d2d.arcsec < sep)[0]
if return_sep:
return ii,idx[ii],d2d.arcsec[ii]
else:
return ii,idx[ii]
def s82_ra_slice(s82,ra_range):
return np.where((s82['ra']>ra_range[0]) & (s82['ra']<ra_range[1]))[0]
def load_stripe82_truth(ra_range=None):
s82 = fits.getdata(os.path.join(os.environ['DESITARGTRUTH'],
'stripe82-dr12-stars.fits.gz'))
if ra_range is not None:
s82 = s82[s82_ra_slice(s82,ra_range)]
return s82
def has_coverage(truth,objs):
return ( (truth['ra']>objs['ALPHA_J2000'].min()) &
(truth['ra']<objs['ALPHA_J2000'].max()) &
(truth['dec']>objs['DELTA_J2000'].min()) &
(truth['dec']<objs['DELTA_J2000'].max()) )
def load_obsdb():
return Table.read('../basschute/config/nov2015_mod.fits')
def stripe82_catalogs(bokdir,ccdsfile='bass-ccds-idmnov2015.fits'):
from boketc import k_ext
alltabs = []
mrows = []
fieldNum = 1
ccdlist = Table.read(ccdsfile)
# start with the Stripe 82 truth table as a base catalog
s82truth = load_stripe82_truth()
# iterate over the two S82 fields observed (Deep2F3 and a random spot)
for field,ra_range in zip(['s82cal%s_ra334_%s','deep2%s_ra352_%s'],
[(333.5,336.0),(351.0,353.5)]):
ii = s82_ra_slice(s82truth,ra_range)
s82 = s82truth[ii]
# initialize the table with Stripe 82 fields and do some renaming
# for convenience
t = Table(s82truth[ii],masked=True)
for cn in t.colnames:
cn_new = cn.replace('psfmagerr','err')
cn_new = cn_new.replace('psfmag','mag')
t.rename_column(cn,'sdss_'+cn_new)
t['sdss_idx'] = ii
# match the Bok catalogs
for k,dtyp in [('x','f4'),('y','f4'),('ra','f8'),('dec','f8'),
('idx','i4'),('mag','f4'),('err','f4'),
('field','i4'),('ccd','i4'),('amp','i4'),
('kx','f4'),('apcorr','f4')]:
for b in 'gr':
t['bok_'+k+'_'+b] = np.zeros((len(ii),11),dtype=dtyp)
for b in 'gr':
t['bok_mag_'+b].mask = True
for n,b in itertools.product(range(11),'gr'):
sfx = '123456abcde'[n]
fieldName = field % (b,sfx)
catf = os.path.join(bokdir,fieldName+'.cat.fits')
try:
cat = fits.open(catf)
except IOError:
continue
for ccdNum in range(1,5):
objs = cat[ccdNum].data
ii = np.where(has_coverage(s82,objs))[0]
m1,m2,d = srcor(objs['ALPHA_J2000'],objs['DELTA_J2000'],
s82['ra'][ii],s82['dec'][ii],
2.0,return_sep=True)
print ' %20s[%d] matched %4d out of %4d (%.2f)' % \
(fieldName,ccdNum,len(m1),len(ii),np.median(d))
t['bok_idx_'+b][ii[m2],n] = objs['NUMBER'][m1]
t['bok_field_'+b][ii[m2],n] = fieldNum
t['bok_ccd_'+b][ii[m2],n] = ccdNum
ampIndex = get_amp_index(objs['X_IMAGE'][m1],
objs['Y_IMAGE'][m1])
t['bok_amp_'+b][ii[m2],n] = 4*(ccdNum-1) + ampIndex + 1
for k1,k2 in [('x','X_IMAGE'),('y','Y_IMAGE'),
('ra','ALPHA_J2000'),('dec','DELTA_J2000')]:
t['bok_'+k1+'_'+b][ii[m2],n] = objs[k2][m1]
flux = np.ma.array(objs['FLUX_APER'][m1,1],
mask=((objs['FLUX_APER'][m1,1]<=0) |
(objs['FLAGS'][m1]>0)))
fluxerr = objs['FLUXERR_APER'][m1,1]
mag = -2.5*np.ma.log10(flux/test_exptimes[n])
t['bok_mag_'+b][ii[m2],n] = mag
t['bok_mag_'+b][ii[m2],n].mask = mag.mask
t['bok_err_'+b][ii[m2],n] = 1.0856*np.ma.divide(fluxerr,flux)
# meta-data about the image
totflux = objs['FLUX_APER'][m1,-1]
apcorr = objs['FLUX_APER'][m1] / totflux[:,np.newaxis]
psfcorr = objs['FLUX_PSF'][m1] / totflux
fwhm = np.median(objs['FWHM_IMAGE'][m1])
apcorr = sigma_clip(apcorr[:,1]).mean()
ccdent = np.where((ccdlist['image_filename'] ==
fieldName+'.fits') &
(ccdlist['ccdnum']==ccdNum))[0][0]
airmass = ccdlist['airmass'][ccdent]
mrows.append((fieldName,fieldNum,b,ccdNum,fwhm,apcorr,
airmass,ccdlist['avsky'][ccdent],
ccdlist['arawgain'][ccdent]))
# copy corrections into photo tab
t['bok_kx_'+b][ii[m2],n] = k_ext[b]*(airmass-1)
t['bok_apcorr_'+b][ii[m2],n] = 2.5*np.log10(apcorr)
if b=='r':
fieldNum += 1
# eliminate entries without Bok coverage in both bands
ii = np.where(~(t['bok_mag_g'].mask.all(axis=1) &
t['bok_mag_r'].mask.all(axis=1)))[0]
t = t[ii]
# now bring in PS1
for k,dtyp in [('ra','f8'),('dec','f8'),('idx','i4')]:
t['ps1_'+k] = np.zeros(len(t),dtype=dtyp)
for j,b in enumerate('grizy'):
t['ps1_mag_'+b] = np.zeros(len(t),dtype='f4')
ps1objs = ps1cal.get_ps1_stars(t['sdss_ra'],t['sdss_dec'])
m1,m2 = srcor(ps1objs['RA'],ps1objs['DEC'],
t['sdss_ra'],t['sdss_dec'],2.0)
for k1,k2 in [('ra','RA'),('dec','DEC'),('idx','OBJ_ID')]:
t['ps1_'+k1][m2] = ps1objs[k2][m1]
for j,b in enumerate('grizy'):
t['ps1_mag_'+b][m2] = ps1objs['MEDIAN'][m1,j]
#
alltabs.append(t)
# combine tables & calculate residual per-amplifier offsets
tab = vstack(alltabs)
for b in 'gr':
mag = tab['bok_mag_'+b] - tab['bok_kx_'+b] + tab['bok_apcorr_'+b]
mivar = np.ma.power(tab['bok_err_'+b],-2)
mean_mag = np.ma.average(sigma_clip(mag,axis=1),weights=mivar,axis=1)
tab['bok_meanmag_'+b] = mean_mag
tab['bok_ampcorr_'+b] = np.zeros_like(tab['bok_mag_'+b])
print '\n%s band amp correction (millimag): ' % b
for amp in range(1,17):
amp_mags = np.ma.array(mag,mask=(mag.mask|tab['bok_amp_'+b]!=amp))
amp_dmag = sigma_clip(mean_mag[:,np.newaxis] - amp_mags)
ii = np.where(tab['bok_amp_'+b] == amp)
tab['bok_ampcorr_'+b][ii] = amp_dmag.mean()
tab.meta['AMPC%s%02d'%(b.upper(),amp)] = amp_dmag.mean()
print '%4d +/- %2d ' % (1e3*amp_dmag.mean(),1e3*amp_dmag.std()),
if (amp%4)==0: print
print
mag += tab['bok_ampcorr_'+b]
tab['bok_magcorr_'+b] = mag
mean_mag = np.ma.average(sigma_clip(mag,axis=1),weights=mivar,axis=1)
tab['bok_meanmagcorr_'+b] = mean_mag
# something weird here
for k in ['mag','magcorr','ampcorr']:
for b in 'gr':
tab['bok_'+k+'_'+b].fill_value = np.nan
tab.write('stripe82bok_nov2015stars.fits',overwrite=True)
#
metaTab = Table(rows=mrows,
names=('field','fieldNum','filter','ccdNum',
'fwhm','apCorr','airmass','skyADU','gain'))
metaTab.write('stripe82bok_nov2015sum.fits',overwrite=True)
def load_nov2015_data():
t = Table.read('stripe82bok_nov2015stars.fits')
# multi-dim Table columns written to FITS lose their masks...
for k in ['mag','magcorr','meanmag','meanmagcorr']:
for b in 'gr':
t['bok_%s_%s'%(k,b)].mask = np.isnan(t['bok_%s_%s'%(k,b)])
for b in 'grizy':
t['ps1_mag_'+b].mask = t['ps1_mag_'+b]==0
return t
def calc_color_terms(doplots=False,savefit=False):
t = load_nov2015_data()
n = t['bok_mag_g'].shape[1]
## refclr = {'sdss':np.tile(t['sdss_mag_g']-t['sdss_mag_z'],(n,1)).transpose(),
## 'ps1':np.tile(t['ps1_mag_g']-t['ps1_mag_i'],(n,1)).transpose()}
refclr = {'sdss':t['sdss_mag_g']-t['sdss_mag_i'],
'ps1':t['ps1_mag_g']-t['ps1_mag_i']}
refs = ['sdss','ps1']
for b in 'gr':
refclr_all = {'sdss':[],'ps1':[]}
dmag_all = {'sdss':[],'ps1':[]}
mag = t['bok_meanmagcorr_'+b]
for ref in refs:
refmag = t[ref+'_mag_'+b]
dmag = sigma_clip(mag-refmag)
zp0 = dmag.mean()
bokmag = mag - zp0
ii = np.where(~dmag.mask)
dmag_all[ref].append(np.array(bokmag-refmag)[ii])
refclr_all[ref].append(np.array(refclr[ref][ii]))
# iteratively fit a polynomial of increasing order to the
# magnitude differences
cterms = {}
for ref in refs:
_dmag = np.concatenate(dmag_all[ref]).flatten()
_refclr = np.concatenate(refclr_all[ref]).flatten()
mask = np.abs(_dmag) > 0.25
for iternum in range(3):
order = iternum+1
tmp_dmag = np.ma.array(_dmag,mask=mask)
fit = np.ma.polyfit(_refclr,tmp_dmag,order)
magoff = sigma_clip(_dmag-np.polyval(fit,_refclr))
mask = magoff.mask
cterms[ref] = fit
if savefit:
_fit = fit.copy()
# remove the lowest order term (absorbed by zeropoint)
_fit[-1] = 0
np.savetxt('config/bok2%s_%s_gicoeff.dat'%(ref,b),_fit)
if doplots:
for ref in refs:
_dmag = np.concatenate(dmag_all[ref])
_refclr = np.concatenate(refclr_all[ref])
fig = plt.figure(figsize=(10,6))
fig.subplots_adjust(0.09,0.1,0.98,0.92)
ax1 = fig.add_subplot(211)
#plt.scatter(_refclr,_dmag,
# s=3,c='0.2',edgecolor='none')
ax1.hexbin(_refclr,_dmag,
cmap=plt.get_cmap('gray_r'),bins='log')
ax1.axhline(0,c='c')
xx = np.linspace(-1,5,100)
ax1.plot(xx,np.polyval(cterms[ref],xx),c='r')
try:
oldcterms = np.loadtxt('config/bok2%s_%s_gicoeff.dat' %
(ref,b))
yoff = cterms[ref][-1]
ax1.plot(xx,yoff+np.polyval(oldcterms,xx),c='orange')
except:
pass
ax1.set_ylabel('%s(Bok) - %s(%s)'%(b,b,ref.upper()))
ax1.set_ylim(-0.25,0.25)
order = len(cterms[ref])-1
clrstr = {'sdss':'gi','ps1':'gi'}[ref]
polystr = ' '.join(['%+.5f*%s^%d'%(c,clrstr,order-d)
for d,c in enumerate(cterms[ref])])
ax1.set_title(('$%s(Bok) - %s(%s) = '%(b,b,ref.upper())) +
polystr+'$',size=14)
ax2 = fig.add_subplot(212)
ax2.hexbin(_refclr,_dmag-np.polyval(cterms[ref],_refclr),
cmap=plt.get_cmap('gray_r'),bins='log')
ax2.axhline(0,c='r')
ax2.set_ylim(-0.15,0.15)
for ax in [ax1,ax2]:
if ref=='sdss':
ax.axvline(0.4,c='b')
ax.axvline(3.7,c='b')
ax.set_xlabel('SDSS g-i')
ax.set_xlim(-0.05,3.9)
else:
ax.axvline(0.4,c='b')
ax.axvline(2.7,c='b')
ax.set_xlabel('PS1 g-i')
ax.set_xlim(-0.05,2.8)
ax.axvline(0,c='MidnightBlue')
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.02))
fig.savefig('%s_%s_to_bok_gicolors.png'%(b,ref))
def calc_zeropoints(ref='ps1'):
t = load_nov2015_data()
refclr = {'sdss':t['sdss_mag_g']-t['sdss_mag_z'],
'ps1':t['ps1_mag_g']-t['ps1_mag_i']}[ref]
zp = np.zeros((12,2,16))
for j,b in enumerate('gr'):
coeff = np.loadtxt('config/bok2%s_%s_coeff.dat'%(ref,b))
refmag = t[ref+'_mag_'+b] + np.polyval(coeff,refclr.filled(0))
refmag = refmag[:,np.newaxis]
# find the zeropoint offsets for each image and amplifier independently
for k,amp in enumerate(range(1,17)):
is_amp = t['bok_amp_'+b]==amp
for field in range(2):
# split the two fields
if field==0:
is_field = t['sdss_ra'] > 345
else:
is_field = t['sdss_ra'] < 345
mag = t['bok_mag_'+b].copy()
mag.mask |= refclr.mask[:,np.newaxis]
mag.mask[~(is_field[:,np.newaxis]&is_amp)] = True
dmag = sigma_clip(refmag-mag,axis=0)
if field==0:
zp[:6,j,k] = dmag.mean(axis=0)
else:
zp[6:,j,k] = dmag.mean(axis=0)
# np.savez('foo',zpEl=zp,zpADU=...,gain=)
return zp
# TODO convert zps back to ADU and apply nominal ext corr
def dump_zeropoints(fieldname,band,byamp=False,showADU=True,**kwargs):
from boketc import k_ext
ampn = np.array(ampNums)
tab = Table.read(fieldname+'_merged.fits')
zpim,zpccd,zpamp = calc_zeropoints(tab,band,**kwargs)
bokdir = os.path.join(os.environ['BASSRDXDIR'],'reduced',
'bokpipe_v0.2','nov15data')
if showADU:
obsdb = load_obsdb()
fname = fieldname.replace('r_','bokr_')
airmass = np.array([ obs['airmass'] for obs in obsdb
if obs['objName'].startswith(fname) and
obs['good'] ])
zp0adu = zpim - 2.5*np.log10(1.375) - k_ext[band]*(airmass-1)
for j in range(len(zpim)):
print 'image %2d: ' % (j+1),
for ccdNum in range(1,5):
print '%6.3f ' % zpccd[j,ccdNum-1],
if byamp:
print
for ccdNum in range(1,5):
print ' ',
for ai in range(4):
print 'IM%-2d %6.3f ' % \
(ampn[ccdNum-1][ai],zpamp[j,ccdNum-1,ai]),
print
print ' %6.3f %6.3f' % (zpim[j],zp0adu[j])
print
print ' average zeropoints: %6.3f %6.3f' % (zpim.mean(),zp0adu.mean())
if byamp and showADU:
from bokpipe.bokproc import nominal_gain,ampOrder
# to derive per-amp zeropoints using applied gain corrections
gaindat = np.load(os.path.join(bokdir,'..','diagnostics',
'gainbal_20151112_%s.npz'%{'g':'g','r':'bokr'}[band]))
gain = nominal_gain * np.product(gaindat['gainCor'],axis=0)
# reorder arrays to match ampNumber
ampgain = nominal_gain[np.array(ampOrder)-1][ampn-1]
deltazp = zpim.mean() - zpamp
ampdelta = deltazp.mean(axis=0)
extcorr = (k_ext[band]*(airmass-1))[:,np.newaxis,np.newaxis]
zp0adu_amp = zpamp - 2.5*np.log10(ampgain) - extcorr + ampdelta
zp0adu_amp = zp0adu_amp.mean(axis=0)
print
print ' per-amplifier zeropoints in ADU:'
for ccdNum in range(1,5):
for ai in range(4):
print ' IM%-2d %6.3f ' % \
(ampn[ccdNum-1][ai],zp0adu_amp[ccdNum-1,ai]),
print
print
_zp = zp0adu_amp.flatten()
_ii = _zp.argsort()
print 'sorted by zeropoint(ADU):'
for _i in _ii:
print ' IM%-2d %6.3f' % (ampn.flatten()[_i],_zp[_i])
print ' median is %6.3f' % np.median(zp0adu_amp)
print
def delta_zps_byamp(zpFit):
for ccdi in range(4):
ccdmean = np.mean(zpFit[:,ccdi,:])
for ampj in range(4):
ampmean = np.mean(zpFit[:,ccdi,ampj])
print 'IM%d %8.4f' % (ampNums[ccdi][ampj],ccdmean-ampmean)
def dump_all_zeropoints():
for fpfx,fsfx in [('s82cal','ra334'),('deep2','ra352')]:
for filt in ['g','r']:
fieldcatf = fpfx+filt+'_'+fsfx+'_merged.fits'
fieldcat = Table.read(fieldcatf)
print fieldcatf
dump_zeropoints(fieldcat,filt[-1])
print
print
def check_scatter(_tab,band,j=None,colorcorr=False):
from scipy.stats import scoreatpercentile
piles = [25,50,75,90,95]
for i in range(2):
if i==0:
tab = _tab[_tab['sdss_ra']<340]
print '\nStripe 82 RA=334'
else:
tab = _tab[_tab['sdss_ra']>340]
print '\nDeep2F3'
print '%8s ' % 'mag',
print ' -%2d%%- '*len(piles) % tuple(piles)
for mag1 in np.arange(17,21.1,0.5):
is_mag = ( (tab['sdss_mag_'+band]>(mag1-0.25)) &
(tab['sdss_mag_'+band]<(mag1+0.25)) )
if j is None:
mag = tab['bok_meanmagcorr_'+band]
else:
mag = tab['bok_magcorr_'+band][:,j]
ii = np.where(~mag.mask & is_mag)[0]
dm = mag[ii] - tab['sdss_mag_'+band][ii]
dist = scoreatpercentile(np.abs(dm-np.median(dm)),piles)
print '%8.1f ' % mag1,
print '%6d '*len(piles) % tuple(1e3*dist)
print
def get_zeropoints(tab,zps,zptype):
zpIm,zpCCD,zpAmp = zps
if zptype=='image':
zp = zpIm[:,np.newaxis]
elif zptype=='ccd':
# add zp=0 for ccd=0 (empty entries)
zp = np.hstack([np.zeros((zpCCD.shape[0],1)),zpCCD])
zp = np.choose(tab['ccdNum'],zp.transpose())[:,:,np.newaxis]
elif zptype=='amp':
ampIndex = get_amp_index(tab['X_IMAGE'],tab['Y_IMAGE'])
ai = np.clip(4*(tab['ccdNum']-1) + ampIndex + 1, 0, 16)
zp = np.hstack([np.zeros((zpAmp.shape[0],1)),zpAmp.reshape(-1,16)])
zp = np.choose(ai,zp.transpose())[:,:,np.newaxis]
return zp
def focalplanevar(tab,band,nbin=4,doplot=False,vr=0.015,shownum=False,
zpcorr=None):
apNum = 2
if zpcorr is None:
zp = None
else:
zps = calc_zeropoints(tab,band,apNum=apNum)
zp = get_zeropoints(tab,zps,zpcorr)
aperMag = flux2mag(tab,band,'APER',zp=zp)[:,:,apNum]
refMag = tab['tMag']
meanMag = sigma_clip(aperMag,axis=1).mean(axis=1)
deltaMag = aperMag - meanMag[:,np.newaxis]
deltaMag[tab['tMag']>19,:].masked = True
nx = 4096 // nbin
ny = 4032 // nbin
xi = (tab['X_IMAGE']/nx).astype(np.int32)
yi = (tab['Y_IMAGE']/ny).astype(np.int32)
fpIm = np.zeros((4,nbin,nbin))
for ccdi in range(4):
for i in range(nbin):
for j in range(nbin):
ii = np.where((tab['ccdNum']==ccdi+1)&(yi==i)&(xi==j))
dm = sigma_clip(deltaMag[ii])
if shownum:
fpIm[ccdi,i,j] = (~dm.mask).sum()
else:
fpIm[ccdi,i,j] = np.ma.mean(dm)
if doplot:
fig = plt.figure(figsize=(6,6.15))
plt.subplots_adjust(0.04,0.035,0.96,0.88,0.25,0.12)
for pnum,ccdi in enumerate([0,2,1,3],start=1):
ax = plt.subplot(2,2,pnum)
im = fpIm[ccdi]
if ccdi <= 1:
im = im[:,::-1]
if ccdi % 2 == 1:
im = im[::-1,:]
if shownum:
_im = ax.imshow(im,origin='lower',interpolation='nearest',
cmap=plt.cm.hot_r)
else:
_im = ax.imshow(im,origin='lower',interpolation='nearest',
vmin=-vr,vmax=vr,cmap=plt.cm.RdBu)
plt.title('CCD%d'%(ccdi+1))
if pnum==1:
cbax = fig.add_axes([0.1,0.98,0.8,0.015])
cb = fig.colorbar(_im,cax=cbax,orientation='horizontal')
if not shownum:
cb.locator = ticker.MultipleLocator(0.005)
cb.update_ticks()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
return fpIm
def stripe82zps():
s82g = Table.read('s82calg_ra334_merged.fits')
s82r = Table.read('s82calr_ra334_merged.fits')
plt.ioff()
for filt,tab in [('g',s82g),('r',s82r)]:
for zpcorr in ['image','ccd','amp']:
fpim = focalplanevar(tab,filt,doplot=True,nbin=8,vr=0.015,
zpcorr=zpcorr)
plt.savefig('s82zp_%s_%s.png' % (filt,zpcorr))
plt.ion()
def stripe82_seeing():
bokdir = os.path.join(os.environ['BASSRDXDIR'],'nov15data')
for filt in 'gr':
fields = ['s82cal%s_ra334_%s' % (filt,n) for n in '123456abcde']
for field in fields:
cat = fits.getdata(os.path.join(bokdir,filt,field+'.cat.fits'))
ii = np.where((cat['FWHM_IMAGE']<10)&(cat['FLAGS']==0))[0]
fwhm = np.ma.median(sigma_clip(cat['FWHM_IMAGE'][ii]))[0]
print '%s %.2f %.2f' % (field,fwhm,fwhm*0.455)
def stripe82_aperphot_all(which='s82cal',redux='naoc',**kwargs):
rdxdir = os.path.join(os.environ['BASSRDXDIR'],'reduced')
if redux=='naoc':
bokdir = os.path.join(rdxdir,'nov15data')
elif redux=='naocv1':
bokdir = os.path.join(rdxdir,'nov15data_OLD')
elif redux=='idm':
bokdir = os.path.join(rdxdir,'bokpipe_v0.2','nov15data')
elif redux=='noao':
bokdir = os.path.join(rdxdir,'noaocp','nov15data')
else:
raise ValueError
aperRad = kwargs.pop('aperRad',7.0) / 0.455 # asec -> pix
if which=='s82cal':
s82cat = Table(load_stripe82_truth(ra_range=(332,336)),masked=True)
nfields = 11
fname = 's82cal%s_ra334_%s'
else:
s82cat = Table(load_stripe82_truth(ra_range=(349,354)),masked=True)
nfields = 6
fname = 'deep2%s_ra352_%s'
exptime = np.array([100.]*6 + [25.,50.,100.,200.,400.])[:nfields]
cols = ['x','y','counts','countsErr','peakCounts','flags','ccdNum']
for k in cols:
dtype = np.float32 if k not in ['flag','ccdNum'] else np.int32
s82cat[k] = np.zeros((len(s82cat),nfields,2),dtype=dtype)
for i in range(nfields):
for j,filt in enumerate('gr'):
field = fname % (filt,'123456abcde'[i])
try:
print 'calculating aperture phot for ',field,
imf = os.path.join(bokdir,field+'.fits')
bpmask = None #fitsio.FITS(imf.replace('.fits','.wht.fits'))
ph = aper_phot_image(imf,s82cat['ra'],s82cat['dec'],[aperRad],
badPixMask=bpmask,calc_peak=True)
print
except IOError:
print '--> MISSING'
continue
for k in cols:
s82cat[k][ph['objId'],i,j] = ph[k].squeeze()
# objects with no coverage have ccdNum==0 in all frames/filters
ii = np.where(s82cat['ccdNum'].sum(axis=-1).sum(axis=-1)>0)[0]
s82cat = s82cat[ii]
missing = s82cat['ccdNum']==0
for k in cols:
s82cat[k].mask |= missing
s82cat['ampIndex'] = get_amp_index(s82cat['x'],s82cat['y'])
s82cat['ampNum'] = 4*(s82cat['ccdNum']-1) + s82cat['ampIndex'] + 1
if redux=='idm':
# units of image are electrons
s82cat['cps'] = s82cat['counts'] / exptime[np.newaxis,:,np.newaxis]
else:
# units of image are electrons/s
s82cat['cps'] = s82cat['counts'].copy()
s82cat['counts'] = s82cat['cps'] * exptime[np.newaxis,:,np.newaxis]
s82cat['countsErr'] *= exptime[np.newaxis,:,np.newaxis]
s82cat['meanCps'] = np.average(s82cat['cps'],weights=exptime,axis=1)
s82cat['snr'] = s82cat['counts']/s82cat['countsErr']
s82cat['nobs'] = (~s82cat['counts'].mask).sum(axis=1)
ctsratio = np.ma.divide(s82cat['cps'],s82cat['meanCps'][:,np.newaxis,:])
s82cat['dmag'] = -2.5*np.ma.log10(ctsratio)
s82cat['sigMag'] = np.ma.std(s82cat['dmag'],axis=1)
return s82cat
def selfcal(s82cat):
exptime = np.array([100.]*6 + [25.,50.,100.,200.,400.])
ii = np.where(np.any(s82cat['nobs']>6,axis=1))[0]
imgcal = sigma_clip(s82cat['dmag'][ii],
sigma=2.0,iters=3,axis=0).mean(axis=0)
print imgcal
cscl = 10**(-0.4*imgcal)
s82cat['cpsCal'] = s82cat['cps']*cscl
s82cat['meanCpsCal'] = np.average(s82cat['cpsCal'],
weights=exptime,axis=1)
ctsratio = np.ma.divide(s82cat['cpsCal'],
s82cat['meanCpsCal'][:,np.newaxis,:])
s82cat['dmagCal'] = -2.5*np.ma.log10(ctsratio)
s82cat['dmagCal'] -= sigma_clip(s82cat['dmagCal'][ii],
sigma=2.0,iters=3,axis=0).mean(axis=0).mean(axis=0)
s82cat['sigMagCal'] = np.ma.std(s82cat['dmagCal'],axis=1)
return s82cat
def sdsscal(s82cat,mode='amp'):
exptime = np.array([100.]*6 + [25.,50.,100.,200.,400.])
nimg = len(exptime)
bokmag = -2.5*np.ma.log10(s82cat['cps']/exptime[:,np.newaxis])
s82cat['dmagExt'] = 0*s82cat['dmag']
s82cat['dmagExt'] = np.ma.masked
gcterms = np.loadtxt('config/bok2sdss_g_gicoeff.dat')
rcterms = np.loadtxt('config/bok2sdss_r_gicoeff.dat')
gminusi = s82cat['psfmag_g']-s82cat['psfmag_i']
sdssmag = {'g':s82cat['psfmag_g']+np.polyval(gcterms,gminusi),
'r':s82cat['psfmag_r']+np.polyval(rcterms,gminusi)}
if mode=='amp':
zp = np.ma.zeros((nimg,2,16))
for amp in range(1,17):
isamp = s82cat['ampNum'] == amp
for j in range(nimg):
for k in range(2):
b = 'gr'[k]
good = s82cat['nobs'][:,k] >= 1
ii = np.where(good & isamp[:,j,k])[0]
if len(ii)==0:
zp[j,k,amp-1] = np.ma.masked
continue
dmag = np.ma.subtract(bokmag[ii,j,k],sdssmag[b][ii])
zp[j,k,amp-1] = np.ma.median(sigma_clip(dmag,sigma=2.0))
s82cat['dmagExt'][ii,j,k] = dmag - zp[j,k,amp-1]
s82cat['dmagExt'][ii,j,k].mask |= dmag.mask
elif mode=='ccd':
zp = np.ma.zeros((nimg,2,4))
for ccd in range(1,5):
isccd = s82cat['ccdNum'] == ccd
for j in range(nimg):
for k in range(2):
b = 'gr'[k]
good = s82cat['nobs'][:,k] >= 1
ii = np.where(good & isccd[:,j,k])[0]
if len(ii)==0:
zp[j,k,ccd-1] = np.ma.masked
continue
dmag = np.ma.subtract(bokmag[ii,j,k],sdssmag[b][ii])
zp[j,k,ccd-1] = np.ma.median(sigma_clip(dmag,sigma=2.0))
s82cat['dmagExt'][ii,j,k] = dmag - zp[j,k,ccd-1]
s82cat['dmagExt'][ii,j,k].mask |= dmag.mask
elif mode=='image':
raise ValueError
else:
raise ValueError
s82cat['sigMagExt'] = np.ma.std(s82cat['dmagExt'],axis=1)
return s82cat,zp
def focalplanevar2(s82cat,band,nbin=4,frameno=None,dmk='dmag',
doplot=False,vr=0.015,shownum=False):
tab = s82cat
bk = 'gr'.find(band)
nx = 4096 // nbin
ny = 4032 // nbin
if frameno is None:
s = np.s_[:,:,bk]
else:
s = np.s_[:,frameno,bk]
xi = (tab['x'][s]/nx).astype(np.int32)
yi = (tab['y'][s]/ny).astype(np.int32)
fpIm = np.zeros((4,nbin,nbin))
for ccdi in range(4):
for i in range(nbin):
for j in range(nbin):
ii = np.where((tab['ccdNum'][s]==ccdi+1)&(yi==i)&(xi==j))
dm = sigma_clip(s82cat[dmk][s][ii])
if shownum:
fpIm[ccdi,i,j] = (~dm.mask).sum()
else:
fpIm[ccdi,i,j] = np.ma.mean(dm)
if doplot:
if vr is None:
vmin,vmax = None,None
else:
try:
vmin,vmax = vr
except:
vmin,vmax = -vr,vr
fig = plt.figure(figsize=(6,6.15))
plt.subplots_adjust(0.04,0.035,0.96,0.88,0.25,0.12)
for pnum,ccdi in enumerate([0,2,1,3],start=1):
ax = plt.subplot(2,2,pnum)
im = fpIm[ccdi]
if ccdi <= 1:
im = im[:,::-1]
if ccdi % 2 == 1:
im = im[::-1,:]
if shownum:
_im = ax.imshow(im,origin='lower',interpolation='nearest',
cmap=plt.cm.hot_r)
else:
_im = ax.imshow(im,origin='lower',interpolation='nearest',
vmin=vmin,vmax=vmax,cmap=plt.cm.RdBu)
plt.title('CCD%d'%(ccdi+1))
if pnum==1:
cbax = fig.add_axes([0.1,0.98,0.8,0.015])
cb = fig.colorbar(_im,cax=cbax,orientation='horizontal')
#if not shownum:
# cb.locator = ticker.MultipleLocator(0.005)
#cb.update_ticks()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
return fpIm
def stripe82_linearity_plot(s82tab,filt='g',peak=False):
from scipy.stats import scoreatpercentile
from astrotools.idmstuff import binmean
countsk = 'counts' if not peak else 'peakCounts'
exptime = np.array([25.,50.,100.,200.,400.])
magrange = (s82tab['psfmag_'+filt]>17) & (s82tab['psfmag_'+filt]<22)
plt.figure(figsize=(8.5,9.5))
plt.subplots_adjust(0.10,0.06,0.96,0.98,0.02,0.02)
bk = 'gr'.find(filt)
ii = np.where(s82tab[countsk].mask[:,-5:,bk].sum(axis=1)==0)[0]
counts = s82tab[countsk][ii,-5:,bk].filled(-1)
cps = s82tab['cps'][ii,-5:,bk].filled(-1)
mean_cps = s82tab['meanCps'][ii,bk].filled(-1)
amps = s82tab['ampNum'][ii,-1,bk].filled(-1) # just use last im
flags = s82tab['flags'][ii,-5:,bk].filled(9999)
magrange = magrange[ii]
expected_cts = mean_cps[:,np.newaxis] * exptime
for ampNum in range(1,17):
ax = plt.subplot(8,2,ampNum)
ii = np.where((amps==ampNum) & magrange & (mean_cps>0) &
np.all(flags==0,axis=1))[0]
xall,yall = [],[]
for j in range(len(exptime)):
ctsratio = counts[ii,j]/expected_cts[ii,j]
plt.scatter(np.log10(mean_cps[ii]),ctsratio,
s=7,c='gray',edgecolor='none')
xall.append(np.log10(mean_cps[ii]))
yall.append(ctsratio)
xall = np.concatenate(xall)
yall = np.concatenate(yall)
plt.axhline(1.0,c='r')
xr = scoreatpercentile(xall,[5,99])
xb,yb,yv = binmean(xall,yall,
np.linspace(xr[0],xr[1],10),std=True,median=True,
clip=True)
plt.errorbar(xb,yb,yv,fmt='ks',ms=4)
if countsk=='counts':
plt.xlim(1.4,3.4)
plt.ylim(0.921,1.079)
else:
plt.xlim(1.0,2.4)
plt.ylim(0.89,1.11)
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.04))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.01))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))
if ampNum % 2 == 0:
ax.yaxis.set_ticks([])
if ampNum < 15:
ax.xaxis.set_ticks([])
plt.text(3.15,1.05,'IM%d'%ampNum)
plt.figtext(0.5,0.01,'$log(<flux>)$',size=14,ha='center')
plt.figtext(0.01,0.5,r'$flux / <flux>$',size=14,va='center',
rotation='vertical')
def rename_proc_files(which='naoc'):
rdxdir = os.environ['BASSRDXDIR']
if which=='naoc':
bokdir = os.path.join(rdxdir,'reduced','20151111')
outdir = os.path.join(rdxdir,'reduced','nov15data')
elif which=='noao':
bokdir = os.path.join(rdxdir,'BOK_CP','CP20151111V0')
outdir = os.path.join(rdxdir,'reduced','noaocp','nov15data')
else:
raise ValueError
if not os.path.exists(outdir):
os.makedirs(outdir)
log = Table.read('bass_Nov2015toFeb2016.fits')
ii = np.where(log['utDate']=='20151112')[0]
for i in ii:
for sfx in ['','.wht']:
field = log['objName'][i]
if not (field.startswith('cosmos') or field.startswith('deep2')
or field.startswith('s82cal')):
continue
outfn = os.path.join(outdir,field.replace('bokr','r')+sfx+'.fits')
if os.path.exists(outfn): continue
filt = log['filter'][i]
if which=='naoc' and True:
filt = filt.replace('bokr','r')
if which=='noao':
nsfx = 'oow' if sfx=='.wht' else 'ooi'
_fn = log['fileName'][i].replace('ori',nsfx)+'_%s_v0' % filt
infn = os.path.join(bokdir,_fn)+'.fits.fz'
os.system('funpack -O %s %s' % (outfn,infn))
continue
# for NAOC images combine 4 CCDs into single MEF file
d = log['DTACQNAM'][i].split('.')
hdus = [fits.PrimaryHDU()]
try:
for ccd in range(1,5):
fn = 'p'+d[0][1:]+filt+d[1]+'_%d%s.fits' % (ccd,sfx)
im,hdr = fits.getdata(os.path.join(bokdir,fn),header=True)
for k in ['CTYPE1','CTYPE2']:
hdr[k] = hdr[k].replace('TAN','TPV')
hdus.append(fits.ImageHDU(im,hdr,'CCD%d'%ccd))
hdul = fits.HDUList(hdus)
hdul.writeto(outfn)
except IOError:
pass
def plot_pointings():
from matplotlib.patches import Rectangle
from astrotools.idmstuff import radec_fromstr
filt = 'g'
#fields = ['cosmos%s_ra150_%s' % (filt,n) for n in '123456']
fields = ['deep2%s_ra352_%s' % (filt,n) for n in '123456']
_minmax = lambda a,b: (a,b) if a<=b else (b,a)
plt.figure()
ax = plt.subplot(111)
for f in fields:
imf = os.path.join(os.environ['BASSRDXDIR'],'reduced','bokpipe_v0.2',
'nov15data',filt,f+'.fits')
hdus = fits.open(imf)
for hdu in hdus[1:]:
w = WCS(hdu.header)
ra0,dec0 = w.all_pix2world(1,1,1,ra_dec_order=True)
ra1,dec1 = w.all_pix2world(4096,4032,1,ra_dec_order=True)
ra0,ra1 = _minmax(ra0,ra1)
dec0,dec1 = _minmax(dec0,dec1)
rect = Rectangle((ra0,dec0),ra1-ra0,dec1-dec0,
alpha=0.5,color='0.2')
ax.add_patch(rect)
moscoo = ['10:00:28.00 02:12:20.99', '10:00:19.06 02:17:11.00',
'10:00:25.80 02:15:56.99', '10:00:21.25 02:13:35.00',
'10:00:23.52 02:14:45.99']
for c in moscoo:
ra,dec = radec_fromstr(c,True)
#plt.scatter(ra,dec,c='r',s=100,lw=2,marker='x')
rect = Rectangle((ra-0.3,dec-0.3),0.6,0.6,
color='r',fill=False)
ax.add_patch(rect)
d2f3coo = ['23:25:00 -00:05:00', '23:35:00 -00:05:00',
'23:35:00 +00:27:00', '23:25:00 +00:27:00',
'23:25:00 -00:05:00']
d2f3 = np.array([radec_fromstr(c,True) for c in d2f3coo])
print d2f3.shape
plt.plot(d2f3[:,0],d2f3[:,1],c='r')
#plt.xlim(149.49,151.3)
#plt.ylim(1.05,3.05)
plt.xlim(351,353.5)
plt.ylim(-1.4,1.0)
def get_colors():
apNum = 2
pfx = 's82cal%s_ra334'
mag = {}
for b in 'gr':
tab = Table.read(pfx%b+'_merged.fits')
zps = calc_zeropoints(tab,b,apNum=apNum)
zp = get_zeropoints(tab,zps,'amp')
aperMag = flux2mag(tab,b,'APER',zp=zp)[:,:,apNum]
meanMag = sigma_clip(aperMag,axis=1).mean(axis=1)
refMag = tab['tMag']
ii = np.where(( tab['tMag']>16.5) & (tab['tMag']<21.5) &
~meanMag.mask )[0]
mag[b] = {'bok_'+b:meanMag[ii].filled(0),'ref_'+b:refMag[ii],
'idx':tab['tIndex'][ii]}
tab = join(Table(mag['g']),Table(mag['r']),keys='idx')
tab['bok_gr'] = tab['bok_g'] - tab['bok_r']
tab['ref_gr'] = tab['ref_g'] - tab['ref_r']
return tab
def color_terms(tab=None,nIter=3):
if not tab:
tab = get_colors()
plt.figure(figsize=(9,4))
plt.subplots_adjust(0.08,0.12,0.95,0.95,0.3,0.3)
xedges = np.arange(-0.5,2.01,0.05)
yedges = np.arange(-0.5,0.51,0.02)
xbins = xedges[:-1] + np.diff(xedges)/2
ybins = yedges[:-1] + np.diff(yedges)/2
cfits = []
for pnum,b in enumerate('gr',start=1):
dmag = tab['bok_'+b] - tab['ref_'+b]
dmag = sigma_clip(dmag,sigma=5.0)
ii = np.where(tab['ref_gr']>1.3)[0]
dmag[ii] = np.ma.masked
print len(dmag),dmag.mask.sum()
for iterNum in range(nIter):
fit = np.ma.polyfit(tab['ref_gr'],dmag,1)
resid = dmag - np.polyval(fit,tab['ref_gr'])
dmag[np.abs(resid)>3.5*resid.std()] = np.ma.masked
print iterNum,fit,dmag.mask.sum()
print
cfits.append(fit)
xx = np.array([-0.5,2.0])
n,_,_ = np.histogram2d(tab['ref_gr'],dmag,[xedges,yedges])
ax = plt.subplot(1,2,pnum)
plt.axhline(0,c='m',ls='--')
plt.scatter(tab['ref_gr'],dmag.data,
edgecolor='none',alpha=0.7,s=1,c='k')
plt.contour(xbins,ybins,n.transpose(),colors='b')
plt.plot(xx,np.polyval(fit,xx),c='r',lw=2)
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.05))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.02))
plt.xlim(0.1,1.7)
plt.ylim(-0.35,0.25)
plt.ylabel('Bok %s - SDSS %s'%(b,b),size=11)
plt.xlabel('SDSS g-r',size=11)
plt.text(0.2,-0.3,
r'$%s(Bok) = %s(SDSS) %.2f\times(g-r) + %.2f$' %
((b,b)+tuple(fit)),
size=13,bbox=dict(facecolor='w',alpha=0.8))
np.savetxt('stripe82cterms.dat',cfits)
def etc_check():
from boketc import texp_onsky,k_ext,bok_zpt0
bokdir = os.path.join(os.environ['BASSRDXDIR'],'nov15data')
ebv = 0.04 # rough average in the field
exptime = 100. # all the same
# deep2g = Table.read('deep2g_ra352_merged.fits')
# deep2r = Table.read('deep2r_ra352_merged.fits')
# zps = {b:get_zeropoints(tab,calc_zeropoints(tab,b),'image').squeeze()
# for b,tab in [('g',deep2g),('r',deep2r)]}
zps = {'g':[25.909,25.914,25.922,25.913,25.921,25.926,25.934,
25.932,25.936,25.936,25.938],
'r':[25.753,25.761,25.756,25.750,25.707,25.732,25.739,
25.746,25.749,25.737,25.753]}
for filt in 'gr':
print '[%s] %10s %4s %6s %6s %5s %7s %10s' % \
(filt,'field ','secz','skyext','skymag','fwhm','t_est',
'depth_fac')
fields = ['deep2%s_ra352_%s' % (filt,n) for n in '123456']
for field,zp in zip(fields,zps[filt]):
imf = os.path.join(bokdir,filt,field+'.fits')
hdr0 = fits.getheader(imf,0)
hdr1 = fits.getheader(imf,1)
airmass = hdr0['airmass']
b = {'g':'g','r':'bokr'}[filt]
zp += -2.5*np.log10(1.375) - k_ext[b]*(airmass-1)
skyextinction = np.clip(bok_zpt0[b]-zp,0,np.inf)
cat = fits.getdata(os.path.join(bokdir,filt,field+'.cat.fits'))
ii = np.where((cat['MAG_AUTO']-cat['MAG_PSF']>-0.08) &
(cat['MAGERR_AUTO']<0.03) &
(cat['FLAGS']==0))[0]
fwhm = np.ma.median(sigma_clip(cat['FWHM_IMAGE'][ii]))[0]
fwhm *= 0.455
skyeps = hdr1['SKYVAL']/exptime
skymag = -2.5*np.log10(skyeps) + bok_zpt0[b]
t = texp_onsky(b,airmass,ebv,skyextinction,
skyeps=skyeps,fwhm=fwhm)
dfac = -2.5*0.5*np.log10(100/(t/3))
print '%s %4.2f %6.2f %6.2f %5.2f %7.1f %10.2f' % \
(field,airmass,skyextinction,skymag,fwhm,t/3,dfac)
print
def compare_scatter(phot1,phot2,names,minnobs=4,sigk='sigMag'):
m1,m2 = srcor(phot1['ra'],phot1['dec'],phot2['ra'],phot2['dec'],1.0)
plt.figure(figsize=(10,5))
plt.subplots_adjust(0.08,0.12,0.95,0.92,0.25)
for j,b in enumerate('gr'):
ii = np.where((phot1['nobs'][m1,j]>=minnobs) &
(phot2['nobs'][m2,j]>=minnobs))[0]
plt.subplot(1,2,j+1)
for p,c,l in zip([phot1[m1[ii]],phot2[m2[ii]]],'bg',names):
ii = np.where(p[sigk][:,j] > 0)[0]
plt.scatter(p['psfmag_'+b][ii],p[sigk][ii,j],s=7,c=c,
edgecolor='none',alpha=0.7,label=l)
plt.title('%s band' % b)
plt.legend(loc='upper left')
plt.xlim(15.7,{'g':22.5,'r':22.1}[b])
plt.ylim(-0.005,0.35)
plt.xlabel('SDSS %s mag' % b)
plt.ylabel(r'$\sigma(%s)$' % b)
| bsd-3-clause |
sonnyhu/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 58 | 1803 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
3manuek/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 37 | 7136 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'rf__n_estimators': [20, 200]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
expect = [0.953, 0.960, 0.960, 0.953]
scores = [mean_score for params, mean_score, scores in grid.grid_scores_]
for e, s in zip(expect, scores):
assert_almost_equal(e, s, decimal=3)
| bsd-3-clause |
lukas/scikit-class | examples/keras-puzzle/weights.py | 2 | 2754 | from tensorflow.python import pywrap_tensorflow
import h5py
import numpy as np
from keras.datasets import mnist
from keras.models import load_model
from keras.utils import np_utils
from keras.losses import categorical_crossentropy
from sklearn.metrics import log_loss
def log_softmax(w):
assert len(w.shape) == 1
max_weight = np.max(w, axis=0)
rightHandSize = np.log(np.sum(np.exp(w - max_weight), axis=0))
return w - (max_weight + rightHandSize)
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def load_weights_from_tensorflow(filename):
reader = pywrap_tensorflow.NewCheckpointReader(filename)
weights = reader.get_tensor('Variable')
return weights
def load_biases_from_tensorflow(filename):
reader = pywrap_tensorflow.NewCheckpointReader(filename)
bias = reader.get_tensor('Variable_1')
return bias
def load_weights_from_keras_perceptron(filename):
f = h5py.File(filename)
bias = f['model_weights']['dense_1']['dense_1']['bias:0'][()]
weights = f['model_weights']['dense_1']['dense_1']['kernel:0'][()]
return weights, bias
def load_weights_from_keras_two_layer(filename):
f = h5py.File(filename)
bias1 = (f['model_weights']['dense_2']['dense_2']["bias:0"][()])
weights1 = (f['model_weights']['dense_2']['dense_2']["kernel:0"][()])
bias0 = (f['model_weights']['dense_1']['dense_1']["bias:0"][()])
weights0 = (f['model_weights']['dense_1']['dense_1']["kernel:0"][()])
return weights0, bias0, weights1, bias1
weight0, bias0 = load_weights_from_keras_perceptron('perceptron.h5')
#weights0, bias0, weights1, bias1 = load_weights_from_keras_two_layer('two-layer.h5')
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_width=28
img_height=28
X_train = X_train.astype('float32')
X_train /= 255.
X_test = X_test.astype('float32')
X_test /= 255.
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
first_digit = X_test[:1]
model = load_model("perceptron.h5")
# predicting with our model
predictions = model.predict(first_digit)
first_digit_ground_truth = y_test[:1]
loss = log_loss(first_digit_ground_truth[0], predictions[0])
# Can you calculate the predictions from weights0, bias0 and first_digit?
# Can you calculate the derivative of a weight with respect to the loss function?
# try calculating predictions and derivatives with the two-layer mlp
# try calculating predictions and derivatives with a convolutional neural net
# You can write your own convolution function or use scipy's
# For some crazy reason, have to invert the kernel array
# return scipy.ndimage.convolve(matrix, kernel[::-1, ::-1], mode='constant' )
| gpl-2.0 |
sarvex/tensorflow | tensorflow/lite/micro/kernels/vexriscv/utils/log_parser.py | 15 | 8798 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Analyze function call stack from GDB or Renode
See README for detail usage
Example usage:
python log_parser.py profile.txt --regex=gdb_regex.json --visualize --top=7
* To add a title in the graph, use the optional argument --title to set it
Example usage:
python log_parser.py profile.txt --regex=gdb_regex.json \
--visualize --top=7 --title=magic_wand
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import os
import re
import matplotlib.pyplot as plt
def readlines(filename):
"""
Arg:
filename(str):
Return:
(list of str):
"""
with open(filename, "r") as f:
content = f.read().splitlines()
return content
def writelines(data, filename):
# Write parsed log to file
with open(filename, "w") as f:
for line in data:
f.write(line + "\n")
def load_regex_parser(filename):
"""
Arg:
filename: string for the input json file containing regex
"""
assert filename is not None
with open(filename, "r") as f:
content = json.load(f)
regex_parser = {}
for key, val in content.items():
if isinstance(val, list):
regexs = []
for pattern in val:
regexs.append(re.compile(pattern))
regex_parser[key] = regexs
else:
regex_parser[key] = re.compile(val)
return regex_parser
def gdb_log_parser(data, output, re_file, ignore_list=None, full_trace=False):
"""
Args:
data: list of strings of logs from GDB
output: string of output filename
re_file: path to the regex *.json file
ignore_list: list of string (functions) to ignore
full_trace: bool to generate full stack trace of the log
"""
regex_parser = load_regex_parser(re_file)
trace = collections.defaultdict(list)
stack = []
processed = []
for line in data:
# Skip invalid lines
if not line.startswith("#"):
continue
# Skip redundant lines
if not full_trace and not line.startswith("#0"):
continue
# Remove ANSI color symbols
# line = ANSI_CLEANER.sub("", line)
line = regex_parser["base"].sub("", line)
# Extract function names with regex
find = None
for r in regex_parser["custom"]:
find = r.findall(line)
if len(find) != 0:
break
if find is None or len(find) == 0:
continue
# Extract content from `re.findall` results
target = find[0][0] if isinstance(find[0], tuple) else find[0]
# Extract function name from `$ADDR in $NAME`, e.g.
# `0x40002998 in __addsf3` -> `__addsf3`
if " in " in target:
target = target.split()[-1]
# Remove leading/trailing spaces
target = target.strip()
if full_trace:
if line.startswith("#0") and stack:
# Encode the trace to string
temp = "/".join(stack)
trace[stack[0]].append(temp)
# Clear up previous stack
stack.clear()
stack.append(target)
if not line.startswith("#0"):
continue
if ignore_list and target in ignore_list:
continue
# Strip the string before adding into parsed list
processed.append(target)
print("Extracted {} lines".format(len(processed)))
# Write parsed log to file
writelines(processed, output)
if full_trace:
content = {}
for top, paths in trace.items():
content[top] = []
counter = collections.Counter(paths)
for path, counts in counter.items():
info = {"counts": counts, "path": path.split("/")}
content[top].append(info)
name = os.path.splitext(output)[0]
with open(name + ".json", "w") as f:
json.dump(content, f, sort_keys=True, indent=4)
print("Parsed the log to `{}`".format(output))
def renode_log_parser(data, output, ignore_list=None):
"""
Args:
data: list of strings of logs from Renode
output: string of output filename
ignore_list: list of string (functions) to ignore
"""
message = "Entering function"
extractor = re.compile(r"{} (.*) at".format(message))
ignore_count = 0
processed = []
for idx, line in enumerate(data):
print("Processing {:.2f}%".format((idx + 1) / len(data) * 100.), end="\r")
if message not in line:
continue
find = extractor.findall(line)
# Skip invalid find or unnamed functions
if len(find) == 0 or len(find[0].split()) == 0:
continue
entry = find[0].split()[0]
if ignore_list and entry in ignore_list:
ignore_count += 1
continue
processed.append(entry)
print("Extracted {} lines ({:.2f}%); {} lines are ignored ({:.2f}%)".format(
len(processed),
len(processed) / len(data) * 100., ignore_count,
ignore_count / len(data) * 100.))
# Write parsed log to file
writelines(processed, output)
print("Parsed the log to `{}`".format(output))
def parse_log(filename,
output=None,
re_file=None,
source="gdb",
ignore=None,
full_trace=False):
"""
Args:
filename(str)
output(str)
"""
data = readlines(filename)
print("Raw log: {} lines".format(len(data)))
ignore_list = None
if ignore is not None:
ignore_list = set(readlines(ignore))
print("* {} patterns in the ignore list".format(len(ignore_list)))
name, ext = None, None
if output is None:
name, ext = os.path.splitext(filename)
output = "{}-parsed{}".format(name, ext)
if source == "gdb":
gdb_log_parser(data, output, re_file, ignore_list, full_trace)
elif source == "renode":
renode_log_parser(data, output, ignore_list=ignore_list)
else:
raise NotImplementedError
def visualize_log(filename, top=None, title=None, show=False, save=True):
"""
Arg:
filename(str)
"""
data = readlines(filename)
print("Parsed log: {} lines".format(len(data)))
x, y = get_frequency(data)
if top is not None:
top *= -1
x, y = x[top:], y[top:]
plt.figure(figsize=(3, 5))
plt.barh(x, y)
plt.xlabel("Frequency")
if title:
plt.title(title)
if show:
plt.show()
if save:
fig_name = "{}.png".format(os.path.splitext(filename)[0])
plt.savefig(fname=fig_name, bbox_inches="tight", dpi=300)
print("Figure saved in {}".format(fig_name))
def get_frequency(data):
"""
Arg:
data(list of str):
Return:
keys(list of str):
vals(list of str):
"""
counter = collections.Counter(data)
keys = [pair[0] for pair in sorted(counter.items(), key=lambda x: x[1])]
vals = sorted(counter.values())
return keys, vals
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="Input raw log file.")
parser.add_argument("--output",
type=str,
help="Parsed log file. Default: [NAME]-parsed.[EXT]")
parser.add_argument("--regex",
type=str,
help="Path to the regex files for parsing GDB log.")
parser.add_argument("--visualize",
action="store_true",
help="Parse and visualize")
parser.add_argument("--top", type=int, help="Top # to visualize")
parser.add_argument("--source",
type=str,
default="gdb",
choices=["gdb", "renode"],
help="Source of where the log is captured")
parser.add_argument(
"--ignore",
type=str,
help="List of functions (one for each line in the file) to \
ignore after parsing.")
parser.add_argument("--full-trace", action="store_true", help="")
parser.add_argument("--title",
type=str,
help="Set title for the visualized image")
args = parser.parse_args()
if args.output is None:
fname, extension = os.path.splitext(args.input)
args.output = "{}-parsed{}".format(fname, extension)
parse_log(args.input, args.output, args.regex, args.source, args.ignore,
args.full_trace)
if args.visualize:
visualize_log(args.output, top=args.top, title=args.title)
| apache-2.0 |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/calibration.py | 37 | 20332 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from sklearn.preprocessing import LabelEncoder
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import label_binarize, LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted, check_consistent_length
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to
overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y`` is
neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_ : list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
le = LabelBinarizer().fit(y)
self.classes_ = le.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in
self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method,
classes=self.classes_)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
classes : array-like, shape (n_classes,), optional
Contains unique classes used to fit the base estimator.
if None, then classes is extracted from the given target values
in fit().
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid', classes=None):
self.base_estimator = base_estimator
self.method = method
self.classes = classes
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = self.label_encoder_.\
transform(self.base_estimator.classes_)
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
self.label_encoder_ = LabelEncoder()
if self.classes is None:
self.label_encoder_.fit(y)
else:
self.label_encoder_.fit(self.classes)
self.classes_ = self.label_encoder_.classes_
Y = label_binarize(y, self.classes_)
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| mit |
akrherz/dep | scripts/tillage_timing/dump_dataset1.py | 2 | 3429 | """Dumper for the Dataset 1 request.
MLRA
plant date
sm @ plating
annual detach for year
annual precip for year
Year
Period ID
5 day period middle date
5 day precip total
5 day sm
5 day detach
- This would be one file per planting date (scenarios 59 through 69)
- 2008 thru 2018
"""
import datetime
import glob
from pyiem.dep import read_wb, read_env
from pyiem.util import logger
import pandas as pd
LOG = logger()
def do_scenario(scenario, plantdate, hucdf):
"""Process this scenario."""
index = pd.MultiIndex.from_product(
[range(2008, 2019), range(1, 74)], names=["year", "period"]
)
df = pd.DataFrame(index=index).reset_index()
def f(row):
"""Make date."""
return datetime.date(row["year"], 1, 1) + datetime.timedelta(
days=int(row["period"] - 1) * 5 + 2
)
df["5day_middle_date"] = df.apply(f, axis=1)
df = df.set_index(["year", "period"])
smdfs = []
flowpaths = 0
for _, row in hucdf.iterrows():
huc12 = row["HUC12"]
for fn in glob.glob(
"/i/%s/wb/%s/%s/*" % (scenario, huc12[:8], huc12[8:])
):
smdfs.append(read_wb(fn))
flowpaths += 1
smdf = pd.concat(smdfs)
del smdfs
envdfs = []
for _, row in hucdf.iterrows():
huc12 = row["HUC12"]
for fn in glob.glob(
"/i/%s/env/%s/%s/*" % (scenario, huc12[:8], huc12[8:])
):
envdfs.append(read_env(fn))
envdf = pd.concat(envdfs)
envdf["jday"] = pd.to_numeric(
envdf["date"].dt.strftime("%j"), downcast="integer"
)
del envdfs
# only one ofe 1
smdf = smdf[smdf["ofe"] == 1]
smdf["period"] = (smdf["jday"] + 5) // 5
envdf["period"] = (envdf["jday"] + 5) // 5
# only consider 2008 thru 2018 data
smdf = smdf[(smdf["year"] > 2007) & (smdf["year"] < 2019)]
envdf = envdf[(envdf["year"] > 2007) & (envdf["year"] < 2019)]
gdf = envdf.groupby(["year", "period"]).mean()
df["5day_precip_mm"] = gdf["precip"]
df["5day_detach_kgm2"] = gdf["av_det"]
gdf = smdf.groupby(["year", "period"]).mean()
df["5day_soilmoist"] = gdf["sw1"]
gdf = envdf.groupby("year").sum() / flowpaths
df = df.join(gdf[["precip", "av_det"]])
df = df.rename(
{"precip": "annual_precip_mm", "av_det": "annual_detach_kgm2"}, axis=1
)
gdf = (
smdf[smdf["jday"] == int(plantdate.strftime("%j"))]
.groupby("year")
.mean()
)
df = df.join(gdf["sw1"])
df = df.rename({"sw1": "plant_soilmoist"}, axis=1)
df["plant_date"] = plantdate.strftime("%m %d")
df["mlra_id"] = hucdf.iloc[0]["MLRA"]
df = df.fillna(0)
LOG.info("done with %s %s", plantdate, hucdf.iloc[0]["MLRA"])
return df
def main():
"""Go Main Go."""
apr10 = datetime.date(2000, 4, 10)
mlradf = pd.read_csv(
"myhucs_mlra.txt",
sep=r"\s?\|\s?",
dtype={"HUC12": str},
skipinitialspace=True,
engine="python",
)
for scenario in range(59, 70):
plantdate = apr10 + datetime.timedelta(days=(scenario - 59) * 5)
dfs = []
for _, hucdf in mlradf.groupby("MLRA"):
dfs.append(do_scenario(scenario, plantdate, hucdf))
df = pd.concat(dfs)
df.to_csv(
"dataset1_%s.csv" % (plantdate.strftime("%b%d"),),
float_format="%.4f",
)
if __name__ == "__main__":
main()
| mit |
juliusf/Genetic-SRCPSP | tools/stat_inference/mixture_dist.py | 1 | 1592 |
__author__ = 'jules'
#see http://nbviewer.ipython.org/github/timstaley/ipython-notebooks/blob/compiled/probabilistic_programming/convolving_distributions_illustration.ipynb
import deepThought.ORM.ORM as ORM
from deepThought.util import list_to_cdf
from deepThought.stats.phase_type import infer_distribution
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from deepThought.stats.customDistributions import MixtureDist2
def main():
job = ORM.deserialize("/tmp/output.pickle")
results = sorted(job.tasks.values(), key=lambda x: len(x.execution_history), reverse=True)
jobs = []
for x in results:
mean = np.mean(x.execution_history)
cx = np.sqrt(np.var(x.execution_history)) / mean
if cx >= 1:
jobs.append(x)
#set1 = jobs[1].execution_history
set1 = results[1].execution_history
data = np.array(set1)
mean = np.mean(data)
var = np.var(data)
dist = infer_distribution(data)
errscale = 10
err = stats.norm(loc=mean , scale=var)
delta = 1
sum_rv_delta_size = 1e2#1e-2
mixt = MixtureDist2(0.1, err, dist)
data_x, data_y = list_to_cdf(set1)
new_grid = np.arange(0,90000, 100)
plt.plot(new_grid, dist.cdf(new_grid), label='uniform')
plt.plot(new_grid, err.cdf(new_grid), label='gaussian')
plt.plot(new_grid, mixt.cdf(new_grid), label='Sum')
plt.plot(data_x, data_y, label="data")
#plt.xlim(0,max(new_grid))
plt.legend(loc='best'), plt.suptitle('CDFs')
#plt.ylim(-0.1,1.1)
plt.show()
if __name__ == '__main__':
main() | mit |
sumspr/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/stats/tests/test_math.py | 1 | 1921 | import nose
from datetime import datetime
from numpy.random import randn
import numpy as np
from pandas.core.api import Series, DataFrame, date_range
from pandas.util.testing import assert_almost_equal
import pandas.core.datetools as datetools
import pandas.stats.moments as mom
import pandas.util.testing as tm
import pandas.stats.math as pmath
import pandas.tests.test_series as ts
from pandas import ols
N, K = 100, 10
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
class TestMath(tm.TestCase):
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def setUp(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = date_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
def test_rank_1d(self):
self.assertEqual(1, pmath.rank(self.series))
self.assertEqual(0, pmath.rank(Series(0, self.series.index)))
def test_solve_rect(self):
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
b = Series(np.random.randn(N), self.frame.index)
result = pmath.solve(self.frame, b)
expected = ols(y=b, x=self.frame, intercept=False).beta
self.assert_(np.allclose(result, expected))
def test_inv_illformed(self):
singular = DataFrame(np.array([[1, 1], [2, 2]]))
rs = pmath.inv(singular)
expected = np.array([[0.1, 0.2], [0.1, 0.2]])
self.assert_(np.allclose(rs, expected))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
ilo10/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
peterwilletts24/Python-Scripts | plot_scripts/Rain/plot_from_pp_avg5216_diff.py | 1 | 8633 | """
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.analysis.cartography
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
save_path='/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file = 'rain_mean'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
min_contour = -0.8
max_contour = 0.8
tick_interval=0.2
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
# Min and max lats lons from smallest model domain (dkbhu) - see spreadsheet
latmin=-6.79
latmax=29.721
lonmin=340.
lonmax=379.98
lat_constraint=iris.Constraint(grid_latitude= lambda la: latmin <= la.point <= latmax)
lon_constraint=iris.Constraint(grid_longitude= lambda lo: lonmin <= lo.point <= lonmax)
# Global LAM not rotated - so different coord constraints
lonmin_g=64.1153327
lonmax_g=101.865817
lon_constraint_g = iris.Constraint(grid_longitude= lambda lo: lonmin_g <= lo.point <= lonmax_g)
# Load global cube
gl = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/djzn/djznw/%s.pp' % pp_file
glob = iris.load_cube(gl, lat_constraint & lon_constraint_g)
#glob = iris.load_cube(gl)
cs_glob = glob.coord_system('CoordSystem')
# Unrotate global cube
lat_g = glob.coord('grid_latitude').points
lon_g = glob.coord('grid_longitude').points
#print lat_g
if isinstance(cs_glob, iris.coord_systems.RotatedGeogCS):
print ' Global Model - djznw - Unrotate pole %s' % cs_glob
lons_g, lats_g = np.meshgrid(lon_g, lat_g)
lons_g,lats_g = iris.analysis.cartography.unrotate_pole(lons_g,lats_g, cs_glob.grid_north_pole_longitude, cs_glob.grid_north_pole_latitude)
lon_g=lons_g[0]
lat_g=lats_g[:,0]
#print lats_g
for i, coord in enumerate (glob.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_glob = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_glob = i
csur_glob=cs_glob.ellipsoid
glob.remove_coord('grid_latitude')
glob.remove_coord('grid_longitude')
glob.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_glob)
glob.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_glob)
experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['djzny' ]
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
#pc = iris(pfile)
#pcube = iris.load_cube(pfile, lat_constraint & lon_constraint)
pcube = iris.load_cube(pfile)
#print pcube
#print pc
# Get min and max latitude/longitude and unrotate to get min/max corners to crop plot automatically - otherwise end with blank bits on the edges
# Unrotate cube
lat = pcube.coord('grid_latitude').points
lon = pcube.coord('grid_longitude').points
#print lat
#print 'lat'
#print lon
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - Unrotate pole %s' % (experiment_id,cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
for i, coord in enumerate (pcube.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord = i
if coord.standard_name=='grid_longitude':
lon_dim_coord = i
csur=cs.ellipsoid
pcube.remove_coord('grid_latitude')
pcube.remove_coord('grid_longitude')
pcube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord)
pcube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord)
lon_min=np.min(lons_g)
lon_max=np.max(lons_g)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lats_g)
lat_max=np.max(lats_g)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
print lon_high_tick
print lon_low_tick
pcube_regrid_data = scipy.interpolate.griddata((lats.flatten(), lons.flatten()),pcube.data.flatten(), (lats_g, lons_g), method='linear')
#pcube_regrid = iris.analysis.interpolate.linear(pcube, sample_points)
#print pcube.data.flatten()
pcube_regrid = glob.copy(data=pcube_regrid_data)
pcubediff=pcube_regrid-glob
#print pcube.data[0,0]
#print pcube_regrid_data[0,0]
#print pcubediff.data
#print glob.data[0,0]
plt.figure(figsize=(8,8))
cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lonmin_g+2,lonmax_g-2,latmin+degs_crop_bottom,latmax-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,256)
cont = iplt.contourf(pcubediff*3600, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'black'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'black'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
cbar.set_label('mm/h')
#cbar.set_label(pcube.units, fontsize=10)
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%.1f' % i for i in ticks])
#main_title='%s - Difference' % pcube.standard_name.title().replace('_',' ')
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle_diff.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
#plt.savefig('%s%s/%s/%s_%s_diff.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
openstack/swift | swift/common/middleware/xprofile.py | 3 | 9918 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling middleware for Swift Servers.
The current implementation is based on eventlet aware profiler.(For the
future, more profilers could be added in to collect more data for analysis.)
Profiling all incoming requests and accumulating cpu timing statistics
information for performance tuning and optimization. An mini web UI is also
provided for profiling data analysis. It can be accessed from the URL as
below.
Index page for browse profile data::
http://SERVER_IP:PORT/__profile__
List all profiles to return profile ids in json format::
http://SERVER_IP:PORT/__profile__/
http://SERVER_IP:PORT/__profile__/all
Retrieve specific profile data in different formats::
http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods]
Retrieve metrics from specific function in json format::
http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json
http://SERVER_IP:PORT/__profile__/current/NFL?format=json
http://SERVER_IP:PORT/__profile__/all/NFL?format=json
NFL is defined by concatenation of file name, function name and the first
line number.
e.g.::
account.py:50(GETorHEAD)
or with full path:
opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD)
A list of URL examples:
http://localhost:8080/__profile__ (proxy server)
http://localhost:6200/__profile__/all (object server)
http://localhost:6201/__profile__/current (container server)
http://localhost:6202/__profile__/12345?format=json (account server)
The profiling middleware can be configured in paste file for WSGI servers such
as proxy, account, container and object servers. Please refer to the sample
configuration files in etc directory.
The profiling data is provided with four formats such as binary(by default),
json, csv and odf spreadsheet which requires installing odfpy library::
sudo pip install odfpy
There's also a simple visualization capability which is enabled by using
matplotlib toolkit. it is also required to be installed if you want to use
it to visualize statistic data::
sudo apt-get install python-matplotlib
"""
import os
import sys
import time
from eventlet import greenthread, GreenPool, patcher
import eventlet.green.profile as eprofile
import six
from six.moves import urllib
from swift import gettext_ as _
from swift.common.utils import get_logger, config_true_value
from swift.common.swob import Request
from swift.common.middleware.x_profile.exceptions import MethodNotAllowed
from swift.common.middleware.x_profile.exceptions import NotFoundException
from swift.common.middleware.x_profile.exceptions import ProfileException
from swift.common.middleware.x_profile.html_viewer import HTMLViewer
from swift.common.middleware.x_profile.profile_model import ProfileLog
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
# unwind the iterator; it may call start_response, do lots of work, etc
PROFILE_EXEC_EAGER = """
app_iter = self.app(environ, start_response)
app_iter_ = list(app_iter)
if hasattr(app_iter, 'close'):
app_iter.close()
"""
# don't unwind the iterator (don't consume resources)
PROFILE_EXEC_LAZY = """
app_iter_ = self.app(environ, start_response)
"""
if six.PY3:
thread = patcher.original('_thread') # non-monkeypatched module needed
else:
thread = patcher.original('thread') # non-monkeypatched module needed
# This monkey patch code fix the problem of eventlet profile tool
# which can not accumulate profiling results across multiple calls
# of runcalls and runctx.
def new_setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def new_runctx(self, cmd, globals, locals):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def new_runcall(self, func, *args, **kw):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
class ProfileMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='profile')
self.log_filename_prefix = conf.get('log_filename_prefix',
DEFAULT_PROFILE_PREFIX)
dirname = os.path.dirname(self.log_filename_prefix)
# Notes: this effort may fail due to permission denied.
# it is better to be created and authorized to current
# user in advance.
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dump_interval = float(conf.get('dump_interval', 5.0))
self.dump_timestamp = config_true_value(conf.get(
'dump_timestamp', 'no'))
self.flush_at_shutdown = config_true_value(conf.get(
'flush_at_shutdown', 'no'))
self.path = conf.get('path', '__profile__').replace('/', '')
self.unwind = config_true_value(conf.get('unwind', 'no'))
self.profile_module = conf.get('profile_module',
'eventlet.green.profile')
self.profiler = get_profiler(self.profile_module)
self.profile_log = ProfileLog(self.log_filename_prefix,
self.dump_timestamp)
self.viewer = HTMLViewer(self.path, self.profile_module,
self.profile_log)
self.dump_pool = GreenPool(1000)
self.last_dump_at = None
def __del__(self):
if self.flush_at_shutdown:
self.profile_log.clear(str(os.getpid()))
def _combine_body_qs(self, request):
wsgi_input = request.environ['wsgi.input']
query_dict = request.params
qs_in_body = wsgi_input.read().decode('utf-8')
query_dict.update(urllib.parse.parse_qs(qs_in_body,
keep_blank_values=True,
strict_parsing=False))
return query_dict
def dump_checkpoint(self):
current_time = time.time()
if self.last_dump_at is None or self.last_dump_at +\
self.dump_interval < current_time:
self.dump_pool.spawn_n(self.profile_log.dump_profile,
self.profiler, os.getpid())
self.last_dump_at = current_time
def __call__(self, environ, start_response):
request = Request(environ)
path_entry = request.path_info.split('/')
# hijack favicon request sent by browser so that it doesn't
# invoke profiling hook and contaminate the data.
if path_entry[1] == 'favicon.ico':
start_response('200 OK', [])
return ''
elif path_entry[1] == self.path:
try:
self.dump_checkpoint()
query_dict = self._combine_body_qs(request)
content, headers = self.viewer.render(request.url,
request.method,
path_entry,
query_dict,
self.renew_profile)
start_response('200 OK', headers)
if isinstance(content, six.text_type):
content = content.encode('utf-8')
return [content]
except MethodNotAllowed as mx:
start_response('405 Method Not Allowed', [])
return '%s' % mx
except NotFoundException as nx:
start_response('404 Not Found', [])
return '%s' % nx
except ProfileException as pf:
start_response('500 Internal Server Error', [])
return '%s' % pf
except Exception as ex:
start_response('500 Internal Server Error', [])
return _('Error on render profiling results: %s') % ex
else:
_locals = locals()
code = self.unwind and PROFILE_EXEC_EAGER or\
PROFILE_EXEC_LAZY
self.profiler.runctx(code, globals(), _locals)
app_iter = _locals['app_iter_']
self.dump_checkpoint()
return app_iter
def renew_profile(self):
self.profiler = get_profiler(self.profile_module)
def get_profiler(profile_module):
if profile_module == 'eventlet.green.profile':
eprofile.Profile._setup = new_setup
eprofile.Profile.runctx = new_runctx
eprofile.Profile.runcall = new_runcall
# hacked method to import profile module supported in python 2.6
__import__(profile_module)
return sys.modules[profile_module].Profile()
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def profile_filter(app):
return ProfileMiddleware(app, conf)
return profile_filter
| apache-2.0 |
GEMScienceTools/oq-subduction | openquake/sub/tests/misc/edge_test.py | 1 | 11769 | """
:module:`openquake.sub.test.misc.edge_test`
"""
import os
import glob
import numpy as np
import unittest
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from openquake.hazardlib.geo.geodetic import distance
from openquake.hazardlib.geo.mesh import Mesh
from openquake.sub.misc.edge import (_read_edge, _resample_edge,
create_from_profiles, create_faults,
_rotate_vector, line_between_two_points,
_get_mean_longitude)
from openquake.sub.misc.profile import _read_profile
BASE_DATA_PATH = os.path.dirname(__file__)
class CreateFaultTest(unittest.TestCase):
def setUp(self):
path = os.path.join(BASE_DATA_PATH, '..', 'data', 'misc', 'top_mesh')
x = np.loadtxt(os.path.join(path, 'top_mesh.x'))
y = np.loadtxt(os.path.join(path, 'top_mesh.y'))
z = np.loadtxt(os.path.join(path, 'top_mesh.z'))
self.mesh = np.stack((x, y, z), 2)
def test_create_virtual_fault(self):
"""
Create profiles for the virtual fault and check that all are defined
"""
thickness = 50.
angles = [30., 45., 90., 135]
sampling = 5
idx = 0
for angl in angles:
lines = create_faults(self.mesh, idx, thickness, angl, sampling)
for l in lines[0]:
pts = [[p.longitude, p.latitude, p.depth] for p in l.points]
pts = np.array(pts)
self.assertTrue(not np.any(np.isnan(pts)))
if False:
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')
fig = plt.figure()
ax.plot(self.mesh[idx, :, 0], self.mesh[idx, :, 1],
self.mesh[idx, :, 2]*0.1, '-', lw=2)
for angl in angles:
lines = create_faults(self.mesh, 0, thickness, angl, sampling)
col = np.random.rand(3)
for l in lines[0]:
pts = [[p.longitude, p.latitude, p.depth] for
p in l.points]
pts = np.array(pts)
ax.plot(pts[:, 0], pts[:, 1], pts[:, 2]*0.1, '-',
color=col)
plt.show()
class MeanLongitudeTest(unittest.TestCase):
def test_values_across_idl(self):
computed = _get_mean_longitude(np.array([178, -179]))
expected = 179.5
np.testing.assert_equal(computed, expected)
def test_values_simple(self):
computed = _get_mean_longitude(np.array([178, 179]))
expected = 178.5
np.testing.assert_equal(computed, expected)
class Line3d2PointsTest(unittest.TestCase):
def test01(self):
pnt1 = np.array([1., 2., 3.])
pnt2 = np.array([4., 5., 6.])
expected = np.array([0.58, 0.58, 0.58])
computed = line_between_two_points(pnt1, pnt2)
np.testing.assert_allclose(computed, expected, rtol=1)
class RotateVectorTest(unittest.TestCase):
"""
The tests are performed against the results computed with
this tool:
http://www.nh.cas.cz/people/lazar/celler/online_tools.php
"""
def test01(self):
"""
Rotate the x-axis of 45° around the y-axis
"""
v = np.array([1, 0, 0])
k = np.array([0, 1, 0])
angle = 45
computed = _rotate_vector(v, k, angle)
expected = np.array([0.707107, 0, -0.707107])
np.testing.assert_allclose(computed, expected, rtol=1)
def test02(self):
"""
More general case of rotation
"""
v = np.array([0.4, 0.6, 0.2])
k = np.array([0.5, 0.1, -0.4])
angle = 53.
computed = _rotate_vector(v, k, angle)
expected = np.array([0.646455, 0.057751, 0.372506])
np.testing.assert_allclose(computed, expected, rtol=1)
class CreateFromProfilesTest(unittest.TestCase):
# TODO:
# - check duplicated points in an edge
# - manage the case of discontinuos edges
def setUp(self):
#
path = os.path.join(BASE_DATA_PATH, '../data/slab/cs02/*.csv')
self.profiles = []
for filename in sorted(glob.glob(path)):
self.profiles.append(_read_profile(filename))
#
path = os.path.join(BASE_DATA_PATH, '../data/slab/cs03/*.csv')
self.profiles1 = []
for filename in sorted(glob.glob(path)):
self.profiles1.append(_read_profile(filename))
#
path = os.path.join(BASE_DATA_PATH, '../data/slab/cs04/*.csv')
self.profiles2 = []
for filename in sorted(glob.glob(path)):
self.profiles2.append(_read_profile(filename))
#
path = os.path.join(BASE_DATA_PATH, '../data/profiles01/cs*.txt')
self.profiles3 = []
for filename in sorted(glob.glob(path)):
self.profiles3.append(_read_profile(filename))
def test_create0(self):
"""
Create edges from profiles 0
"""
# sampling: profile, edge
msh = create_from_profiles(self.profiles, 10, 5, False)
def test_create1(self):
"""
Create edges from profiles 1
"""
# sampling: profile, edge
msh = create_from_profiles(self.profiles1, 5, 5, False)
def test_create2(self):
"""
Create edges from profiles 2
"""
# sampling: profile, edge
msh = create_from_profiles(self.profiles2, 20, 25, False)
def test_create3(self):
"""
Create edges from profiles 3
"""
# sampling: profile, edge
msh = create_from_profiles(self.profiles2, 50, 50, False)
def _test_create4(self):
"""
Create edges from profiles 3
"""
msh = create_from_profiles(self.profiles3, 5, 5, False)
assert not np.any(np.isnan(msh))
class ResampleEdgeTest(unittest.TestCase):
def setUp(self):
filename = os.path.join(BASE_DATA_PATH,
'../data/slab/edge/edge_000.csv')
self.edge = _read_edge(filename)
def test_edge_resampling01(self):
"""
Test edge resampling with a resampling distance of 25 km
"""
#
# resampled profile
sampling_distance = 25.
out_line, _, _ = _resample_edge(self.edge, sampling_distance, 5)
#
# lists with coordinates for the resampled profile
lo = [pnt.longitude for pnt in out_line.points]
la = [pnt.latitude for pnt in out_line.points]
de = [pnt.depth for pnt in out_line.points]
#
# lenghts of resampled segments
dsts = []
for i in range(0, len(out_line)-1):
dsts.append(distance(lo[i], la[i], de[i],
lo[i+1], la[i+1], de[i+1]))
#
# testing
expected = np.ones((len(out_line)-1))*sampling_distance
np.testing.assert_allclose(dsts, expected, rtol=2, atol=0.)
def test_edge_resampling02(self):
"""
Test edge resampling with a resampling distance of 10 km
"""
#
# resampled profile
sampling_distance = 10.
out_line, _, _ = _resample_edge(self.edge, sampling_distance, 5)
#
# lists with coordinates for the resampled profile
lo = [pnt.longitude for pnt in out_line.points]
la = [pnt.latitude for pnt in out_line.points]
de = [pnt.depth for pnt in out_line.points]
#
# lenghts of resampled segments
dsts = []
for i in range(0, len(out_line)-1):
dsts.append(distance(lo[i], la[i], de[i],
lo[i+1], la[i+1], de[i+1]))
#
# testing
expected = np.ones((len(out_line)-1))*sampling_distance
np.testing.assert_allclose(dsts, expected, rtol=2, atol=0.)
class ReadEdgeTest(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(BASE_DATA_PATH,
'../data/slab/edge/edge_000.csv')
def test_read_profile(self):
"""
Test reading a edge file
"""
computed = _read_edge(self.filename)
lons = [pnt.longitude for pnt in computed.points]
lats = [pnt.latitude for pnt in computed.points]
deps = [pnt.depth for pnt in computed.points]
lons_expected = [-8.294831883250239457e+01, -8.347113383616317606e+01,
-8.443028702759889370e+01, -8.505794151852860807e+01,
-8.584561547512082313e+01, -8.631551275344533281e+01,
-8.683238047673029314e+01, -8.776764521710948941e+01,
-8.890904386827106975e+01, -8.970302148270327791e+01,
-9.007321601251436505e+01, -9.098563317709692910e+01,
-9.202878921049629923e+01, -9.286755595092729720e+01,
-9.377193007159837634e+01, -9.467064876474159973e+01,
-9.573164826059495169e+01, -9.658845523814640899e+01,
-9.852944168622553889e+01, -1.002200364234107468e+02,
-1.010518388869808177e+02, -1.017966307049553194e+02,
-1.027087419628715566e+02, -1.034520970862245122e+02,
-1.043126646046702177e+02, -1.049145053002839632e+02,
-1.057032567476713325e+02]
lats_expected = [7.655890711151086769e+00, 8.592405740147635029e+00,
8.926827693580914769e+00, 9.379254904438523610e+00,
9.800896181004983276e+00, 1.052077644719489413e+01,
1.126126700604738140e+01, 1.185098362267974181e+01,
1.216955938028376316e+01, 1.257674880493079073e+01,
1.288726010003954414e+01, 1.300458168978518714e+01,
1.364439121600205773e+01, 1.398627418090333485e+01,
1.434332714654129859e+01, 1.488407045045097910e+01,
1.540204147420979730e+01, 1.576928904676865528e+01,
1.607833500594980691e+01, 1.668378236227314559e+01,
1.707899734826530036e+01, 1.744602440690043821e+01,
1.791135119785566232e+01, 1.816301943627114923e+01,
1.846663314884608553e+01, 1.893173126671553774e+01,
1.966107823770858332e+01]
deps_expected = [1.181428571428580199e+01, 1.288571428571435717e+01,
7.885714285714357175e+00, 5.385714285714357175e+00,
1.002857142857152439e+01, 1.288571428571435717e+01,
1.574285714285718996e+01, 2.038571428571435717e+01,
1.074285714285718996e+01, 8.600000000000079581e+00,
1.431428571428580199e+01, 1.217142857142863477e+01,
1.145714285714291236e+01, 7.528571428571524393e+00,
1.145714285714291236e+01, 7.528571428571524393e+00,
4.671428571428634768e+00, 1.752857142857152439e+01,
5.028571428571524393e+00, 6.457142857142912362e+00,
6.100000000000079581e+00, 7.528571428571524393e+00,
7.885714285714357175e+00, 6.457142857142912362e+00,
6.814285714285801987e+00, 8.957142857142912362e+00,
7.528571428571524393e+00]
np.testing.assert_almost_equal(lons, lons_expected)
np.testing.assert_almost_equal(lats, lats_expected)
np.testing.assert_almost_equal(deps, deps_expected)
| agpl-3.0 |
leobrowning92/arduino-lineCCD-spectrometer | plotter.py | 1 | 1293 | import serial
import time
import matplotlib.pyplot as plt
plt.interactive(True)
print 'import'
# open up dummy serial to reset the arduino with
s = serial.Serial(port='/dev/ttyUSB1')
# reset the arduino
s.flushInput()
s.setDTR(level=False)
time.sleep(0.5)
# ensure there is no stale data in the buffer
s.flushInput()
s.setDTR()
time.sleep(1)
# now open up a new serial line for communication
s = serial.Serial(baudrate=115200, port='/dev/ttyUSB1', timeout=0.01)
#initializes plotting axis
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
# initializes data
data=[]
# time for system to settle after opening serial port
time.sleep(1)
# initial read command
s.write('r')
#continuous loop that will plot the data anew each time it runs, as well as
#pass the read command to the arduino
while True:
s.write('r') #read command
#loop which iterates through the serial being read, only taking
#non-empty values and appending them to the data set
while True:
value=s.readline()
if value !='':
data.append(float(value.rstrip()))
#determines the length of the dataset to observe
if len(data)==800:
break
#plots the dataset
ax1.clear()
ax1.plot( range(len(data)), data )
plt.draw()
data=[]
| gpl-3.0 |
cbmoore/statsmodels | statsmodels/graphics/tests/test_gofplots.py | 27 | 6814 | import numpy as np
from numpy.testing import dec
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot, qqline, ProbPlot
from scipy import stats
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
class BaseProbplotMixin(object):
def base_setup(self):
if have_matplotlib:
self.fig, self.ax = plt.subplots()
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_ppplot(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_probplot(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_array(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_array(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_array(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_prbplt(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_prbplt(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_prbplt(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_qqplot_custom_labels(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_ppplot_custom_labels(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_probplot_custom_labels(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_qqplot_pltkwargs(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_ppplot_pltkwargs(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_probplot_pltkwargs(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
class TestProbPlotLongely(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.line = 'r'
self.base_setup()
class TestProbPlotRandomNormalMinimal(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data)
self.line = None
self.base_setup()
class TestProbPlotRandomNormalWithFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, fit=True)
self.line = 'q'
self.base_setup()
class TestProbPlotRandomNormalLocScale(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, loc=8.25, scale=3.25)
self.line = '45'
self.base_setup()
class TestTopLevel(object):
def setup(self):
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.res = self.mod_fit.resid
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
fig = sm.qqplot(self.res, line='r')
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_ProbPlotObjects(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with `ProbPlot` instances
fig = sm.qqplot_2samples(self.prbplt, self.other_prbplot,
line=line)
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_arrays(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with arrays
fig = sm.qqplot_2samples(self.res, self.other_array, line=line)
| bsd-3-clause |
lqhuang/SAXS-tools | scripts/plotDifferenceAnalysis.py | 1 | 9621 | import os.path
import argparse
from matplotlib import pyplot as plt
from DifferenceAnalysis import DifferenceAnalysis
from utils import print_arguments, str2bool
def plot_DifferenceAnalysis(root_directory,
log_intensity=True,
save_figures=True,
fig_format='png',
legend_loc='left',
figures_directory=None,
display=False,
baseline_index=1,
smooth=False,
scale=False,
scale_qmin=0.0,
scale_qmax=-1.0,
crop=False,
crop_qmin=0.0,
crop_qmax=-1.0,
dash_line_index=(None, )):
# read curves
file_location = os.path.join(root_directory, 'Subtracted')
try:
seq_obj = DifferenceAnalysis.from_subtracted_dats(
os.path.join(file_location, 'S_*.dat'),
smooth=smooth,
scale=scale,
ref_dat=None,
scale_qmin=scale_qmin,
scale_qmax=scale_qmax)
except FileNotFoundError:
print(
'Warning: Do not find subtracted curves with S_ prefix.'
'Try to read data from curves with other names (subtracted curves).'
)
seq_obj = DifferenceAnalysis.from_subtracted_dats(
os.path.join(file_location, '*.dat'),
smooth=smooth,
scale=scale,
ref_dat=None,
scale_qmin=scale_qmin,
scale_qmax=scale_qmax)
kwargs = {
'display': display,
'save': save_figures,
'directory': figures_directory,
'legend_loc': legend_loc,
'dash_line_index': dash_line_index
}
# save figures
if not figures_directory:
figures_directory = os.path.join(root_directory, 'Figures')
if not os.path.exists(figures_directory):
os.makedirs(figures_directory)
exp_prefix = os.path.basename(root_directory)
# general
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 5))
seq_obj.plot_profiles(
log_intensity=log_intensity, axes=axes[0, 0], **kwargs)
seq_obj.plot_analysis('guinier', axes=axes[0, 1], **kwargs)
seq_obj.plot_analysis('kratky', axes=axes[1, 0], **kwargs)
seq_obj.plot_analysis('porod', axes=axes[1, 1], **kwargs)
lgd = fig.legend(
axes[0, 0].get_lines(),
seq_obj.data_dict_label(),
loc='center left',
bbox_to_anchor=(0.95, 0.5),
frameon=False)
# fig.subplots_adjust()
fig.tight_layout()
fig_path = os.path.join(
figures_directory, exp_prefix + '_saxs_general_analysis.' + fig_format)
fig.savefig(
fig_path,
dpi=seq_obj.DPI,
bbox_extra_artists=(lgd, ),
bbox_inches='tight')
if display:
plt.show()
# difference
# fig, axes = plt.subplots(nrows=2, ncols=1, sharex=True)
# seq_obj.plot_difference('relative', axes=axes[0],
# baseline_index=baseline_index, **kwargs)
# seq_obj.plot_difference('absolute', axes=axes[1],
# baseline_index=baseline_index, **kwargs)
# axes[0].set_title(None)
# axes[1].set_title([])
# lgd = fig.legend(axes[0].get_lines(), seq_obj.data_dict_label(),
# loc='center left', bbox_to_anchor=(0.95, 0.5), frameon=False)
# fig_path = os.path.join(figures_directory, exp_prefix+'_saxs_difference_analysis.'+fig_format)
# if display:
# plt.show()
# single analysis
seq_obj.plot_profiles(
log_intensity=True,
crop=crop,
crop_qmin=crop_qmin,
crop_qmax=crop_qmax,
filename=exp_prefix + '_saxs_profiles_log_scale.' + fig_format,
**kwargs)
seq_obj.plot_profiles(
log_intensity=False,
crop=crop,
crop_qmin=crop_qmin,
crop_qmax=crop_qmax,
filename=exp_prefix + '_saxs_profiles.' + fig_format,
**kwargs)
# seq_obj.plot_analysis('guinier',
# filename=exp_prefix+'_saxs_guinier_analysis.'+fig_format,
# **kwargs)
seq_obj.plot_analysis(
'kratky',
filename=exp_prefix + '_saxs_kratky_analysis.' + fig_format,
**kwargs)
# seq_obj.plot_analysis('porod',
# filename=exp_prefix+'_saxs_porod_analysis.'+fig_format,
# **kwargs)
seq_obj.plot_difference(
'relative',
baseline_index=baseline_index,
crop=crop,
crop_qmin=crop_qmin,
crop_qmax=crop_qmax,
filename=exp_prefix + '_relative_ratio.' + fig_format,
**kwargs)
seq_obj.plot_difference(
'absolute',
baseline_index=baseline_index,
crop=crop,
crop_qmin=crop_qmin,
crop_qmax=crop_qmax,
filename=exp_prefix + '_absolute_diff.' + fig_format,
**kwargs)
seq_obj.plot_error(
crop=crop,
crop_qmin=crop_qmin,
crop_qmax=crop_qmax,
filename=exp_prefix + '_saxs_error_profiles.' + fig_format,
**kwargs)
seq_obj.plot_error_difference(
'relative',
baseline_index=baseline_index,
crop=crop,
crop_qmin=crop_qmin,
crop_qmax=crop_qmax,
filename=exp_prefix + '_error_relative_diff.' + fig_format,
**kwargs)
plt.close('all')
try:
seq_obj.plot_pair_distribution(
output_dir=os.path.join(root_directory, 'GNOM'),
filename=exp_prefix + '_pair_distribution.' + fig_format,
**kwargs)
except Exception as error:
print('Exception Information:', error.__doc__)
raise (error)
finally:
plt.close('all')
try:
seq_obj.plot_guinier_fitting(
display=display,
save=True,
fig_format=fig_format,
directory=os.path.join(figures_directory, 'guinier_fitting'))
except Exception as error:
print('Exception Information:', error.__doc__)
raise (error)
finally:
plt.close('all')
def main():
# create an argument parser
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'-r', '--root_directory', help='Root directory for EXPERIMENTS data')
parser.add_argument(
'-f',
'--figures_directory',
help=
'Figures directory in root directory for Difference Analysis (default=Figures)',
type=str,
default='Figures')
parser.add_argument(
'--format',
help='Format of figures to save (default=png).',
type=str,
default='png')
parser.add_argument(
'--display',
help='Display figures or not (default=False).',
type=str2bool,
default=False)
parser.add_argument(
'--log_intensity',
help='Plot profiles with log intensity or not. (default=False)',
type=str2bool,
default=True)
parser.add_argument(
'--baseline_index',
help='Index for baseline starts from 1 (default=1)',
type=int,
default=1)
parser.add_argument(
'--smooth',
help='Smooth curves by savgol filter (default=True)',
type=str2bool,
default=True)
parser.add_argument(
'--scale',
help='Whether to scale curves (default=False)',
type=str2bool,
default=False)
parser.add_argument(
'--scale_qmin', help='min q for scaling', type=float, default=0.0)
parser.add_argument(
'--scale_qmax', help='max q for scaling', type=float, default=-1.0)
parser.add_argument(
'--crop',
help='Whether to crop curves (default=True)',
type=str2bool,
default=True)
parser.add_argument(
'--crop_qmin', help='min q for cropping', type=float, default=0.0)
parser.add_argument(
'--crop_qmax', help='max q for cropping', type=float, default=-1.0)
parser.add_argument(
'--dash_line_index',
help='Index for dash line starts from 1, eg: 1,2,3. (default=None)',
type=str,
default=None)
# parse arguments
args = parser.parse_args()
print_arguments(args.__dict__)
root_directory = os.path.realpath(args.root_directory)
figures_directory = os.path.join(root_directory, args.figures_directory)
fig_format = args.format
display = args.display
log_intensity = args.log_intensity
smooth = args.smooth
baseline_index = args.baseline_index
crop = args.crop
crop_qmin = args.crop_qmin
crop_qmax = args.crop_qmax
scale = args.scale
scale_qmin = args.scale_qmin
scale_qmax = args.scale_qmax
try:
dash_line_index = args.dash_line_index.split(',')
dash_line_index = [int(idx) for idx in dash_line_index]
except AttributeError:
dash_line_index = (None, )
# run
plot_DifferenceAnalysis(
root_directory,
log_intensity=log_intensity,
display=display,
save_figures=True,
fig_format=fig_format,
figures_directory=figures_directory,
baseline_index=baseline_index,
smooth=smooth,
scale=scale,
scale_qmin=scale_qmin,
scale_qmax=scale_qmax,
crop=crop,
crop_qmin=crop_qmin,
crop_qmax=crop_qmax,
dash_line_index=dash_line_index)
if __name__ == '__main__':
main()
| gpl-3.0 |
beni55/dipy | dipy/tests/test_scripts.py | 9 | 4292 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Test scripts
Run scripts and check outputs
"""
from __future__ import division, print_function, absolute_import
import os
import shutil
from os.path import (dirname, join as pjoin, abspath)
from nose.tools import assert_true, assert_false, assert_equal
import numpy.testing as nt
import nibabel as nib
from nibabel.tmpdirs import InTemporaryDirectory
from dipy.data import get_data
# Quickbundles command-line requires matplotlib:
try:
import matplotlib
no_mpl = False
except ImportError:
no_mpl = True
from .scriptrunner import ScriptRunner
runner = ScriptRunner(
script_sdir = 'bin',
debug_print_var = 'NIPY_DEBUG_PRINT')
run_command = runner.run_command
DATA_PATH = abspath(pjoin(dirname(__file__), 'data'))
def test_dipy_peak_extraction():
# test dipy_peak_extraction script
cmd = 'dipy_peak_extraction'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_fit_tensor():
# test dipy_fit_tensor script
cmd = 'dipy_fit_tensor'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def test_dipy_sh_estimate():
# test dipy_sh_estimate script
cmd = 'dipy_sh_estimate'
code, stdout, stderr = run_command(cmd, check_code=False)
assert_equal(code, 2)
def assert_image_shape_affine(filename, shape, affine):
assert_true(os.path.isfile(filename))
image = nib.load(filename)
assert_equal(image.shape, shape)
nt.assert_array_almost_equal(image.get_affine(), affine)
def test_dipy_fit_tensor_again():
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
with InTemporaryDirectory():
dwi, bval, bvec = get_data("small_25")
# Copy data to tmp directory
shutil.copyfile(dwi, "small_25.nii.gz")
shutil.copyfile(bval, "small_25.bval")
shutil.copyfile(bvec, "small_25.bvec")
# Call script
cmd = ["dipy_fit_tensor", "--save-tensor", "--mask=none", "small_25.nii.gz"]
out = run_command(cmd)
assert_equal(out[0], 0)
# Get expected values
img = nib.load("small_25.nii.gz")
affine = img.get_affine()
shape = img.shape[:-1]
# Check expected outputs
assert_image_shape_affine("small_25_fa.nii.gz", shape, affine)
assert_image_shape_affine("small_25_t2di.nii.gz", shape, affine)
assert_image_shape_affine("small_25_dirFA.nii.gz", shape, affine)
assert_image_shape_affine("small_25_ad.nii.gz", shape, affine)
assert_image_shape_affine("small_25_md.nii.gz", shape, affine)
assert_image_shape_affine("small_25_rd.nii.gz", shape, affine)
# small_25_tensor saves the tensor as a symmetric matrix following
# the nifti standard.
ten_shape = shape + (1, 6)
assert_image_shape_affine("small_25_tensor.nii.gz", ten_shape,
affine)
@nt.dec.skipif(no_mpl)
def test_qb_commandline():
with InTemporaryDirectory():
tracks_file = get_data('fornix')
cmd = ["dipy_quickbundles", tracks_file, '--pkl_file', 'mypickle.pkl',
'--out_file', 'tracks300.trk']
out = run_command(cmd)
assert_equal(out[0], 0)
| bsd-3-clause |
elfi-dev/elfi | elfi/methods/results.py | 1 | 16936 | """Containers for results from inference."""
import io
import itertools
import logging
import os
import string
import sys
from collections import OrderedDict
import numpy as np
from matplotlib import pyplot as plt
import elfi.visualization.visualization as vis
from elfi.methods.utils import numpy_to_python_type, sample_object_to_dict
logger = logging.getLogger(__name__)
class ParameterInferenceResult:
"""Base class for results."""
def __init__(self, method_name, outputs, parameter_names, **kwargs):
"""Initialize result.
Parameters
----------
method_name : string
Name of inference method.
outputs : dict
Dictionary with outputs from the nodes, e.g. samples.
parameter_names : list
Names of the parameter nodes
**kwargs
Any other information from the inference algorithm, usually from its state.
"""
self.method_name = method_name
self.outputs = outputs.copy()
self.parameter_names = parameter_names
self.meta = kwargs
@property
def is_multivariate(self):
"""Check whether the result contains multivariate parameters."""
for p in self.parameter_names:
if self.outputs[p].ndim > 1:
return True
return False
class OptimizationResult(ParameterInferenceResult):
"""Base class for results from optimization."""
def __init__(self, x_min, **kwargs):
"""Initialize result.
Parameters
----------
x_min
The optimized parameters
**kwargs
See `ParameterInferenceResult`
"""
super(OptimizationResult, self).__init__(**kwargs)
self.x_min = x_min
class Sample(ParameterInferenceResult):
"""Sampling results from inference methods."""
def __init__(self,
method_name,
outputs,
parameter_names,
discrepancy_name=None,
weights=None,
**kwargs):
"""Initialize result.
Parameters
----------
method_name : string
Name of inference method.
outputs : dict
Dictionary with outputs from the nodes, e.g. samples.
parameter_names : list
Names of the parameter nodes
discrepancy_name : string, optional
Name of the discrepancy in outputs.
weights : array_like
**kwargs
Other meta information for the result
"""
super(Sample, self).__init__(
method_name=method_name, outputs=outputs, parameter_names=parameter_names, **kwargs)
self.samples = OrderedDict()
for n in self.parameter_names:
self.samples[n] = self.outputs[n]
self.discrepancy_name = discrepancy_name
self.weights = weights
def __getattr__(self, item):
"""Allow more convenient access to items under self.meta."""
if item in self.meta.keys():
return self.meta[item]
else:
raise AttributeError("No attribute '{}' in this sample".format(item))
def __dir__(self):
"""Allow autocompletion for items under self.meta.
http://stackoverflow.com/questions/13603088/python-dynamic-help-and-autocomplete-generation
"""
items = dir(type(self)) + list(self.__dict__.keys())
items.extend(self.meta.keys())
return items
@property
def n_samples(self):
"""Return the number of samples."""
return len(self.outputs[self.parameter_names[0]])
@property
def dim(self):
"""Return the number of parameters."""
return len(self.parameter_names)
@property
def discrepancies(self):
"""Return the discrepancy values."""
return None if self.discrepancy_name is None else \
self.outputs[self.discrepancy_name]
@property
def samples_array(self):
"""Return the samples as an array.
The columns are in the same order as in self.parameter_names.
Returns
-------
list of np.arrays
"""
return np.column_stack(tuple(self.samples.values()))
def __str__(self):
"""Return a summary of results as a string."""
# create a buffer for capturing the output from summary's print statement
stdout0 = sys.stdout
buffer = io.StringIO()
sys.stdout = buffer
self.summary()
sys.stdout = stdout0 # revert to original stdout
return buffer.getvalue()
def __repr__(self):
"""Return a summary of results as a string."""
return self.__str__()
def summary(self):
"""Print a verbose summary of contained results."""
# TODO: include __str__ of Inference Task, seed?
desc = "Method: {}\nNumber of samples: {}\n" \
.format(self.method_name, self.n_samples)
if hasattr(self, 'n_sim'):
desc += "Number of simulations: {}\n".format(self.n_sim)
if hasattr(self, 'threshold'):
desc += "Threshold: {:.3g}\n".format(self.threshold)
print(desc, end='')
try:
self.sample_means_summary()
except TypeError:
pass
def sample_means_summary(self):
"""Print a representation of sample means."""
s = "Sample means: "
s += ', '.join(["{}: {:.3g}".format(k, v) for k, v in self.sample_means.items()])
print(s)
@property
def sample_means(self):
"""Evaluate weighted averages of sampled parameters.
Returns
-------
OrderedDict
"""
return OrderedDict([(k, np.average(v, axis=0, weights=self.weights))
for k, v in self.samples.items()])
@property
def sample_means_array(self):
"""Evaluate weighted averages of sampled parameters.
Returns
-------
np.array
"""
return np.array(list(self.sample_means.values()))
def __getstate__(self):
"""Says to pickle the exact objects to pickle."""
return self.meta, self.__dict__
def __setstate__(self, state):
"""Says to pickle which objects to unpickle."""
self.meta, self.__dict__ = state
def save(self, fname=None):
"""Save samples in csv, json or pickle file formats.
Clarification: csv saves only samples, json saves the whole object's dictionary except
`outputs` key and pickle saves the whole object.
Parameters
----------
fname : str, required
File name to be saved. The type is inferred from extension ('csv', 'json' or 'pkl').
"""
import csv
import json
import pickle
kind = os.path.splitext(fname)[1][1:]
if kind == 'csv':
with open(fname, 'w', newline='') as f:
w = csv.writer(f)
w.writerow(self.samples.keys())
w.writerows(itertools.zip_longest(*self.samples.values(), fillvalue=''))
elif kind == 'json':
with open(fname, 'w') as f:
data = OrderedDict()
data['n_samples'] = self.n_samples
data['discrepancies'] = self.discrepancies
data['dim'] = self.dim
# populations key exists in SMC-ABC sampler and contains the history of all
# inferences with different number of simulations and thresholds
populations = 'populations'
if populations in self.__dict__:
# setting populations in the following form:
# data = {'populations': {'A': dict(), 'B': dict()}, ...}
# this helps to save all kind of populations
pop_num = string.ascii_letters.upper()[:len(self.__dict__[populations])]
data[populations] = OrderedDict()
for n, elem in enumerate(self.__dict__[populations]):
data[populations][pop_num[n]] = OrderedDict()
sample_object_to_dict(data[populations][pop_num[n]], elem)
# convert numpy types into python types in populations key
for key, val in data[populations].items():
numpy_to_python_type(val)
# skip populations because it was processed previously
sample_object_to_dict(data, self, skip='populations')
# convert numpy types into python types
numpy_to_python_type(data)
js = json.dumps(data)
f.write(js)
elif kind == 'pkl':
with open(fname, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
else:
print("Wrong file type format. Please use 'csv', 'json' or 'pkl'.")
def plot_marginals(self, selector=None, bins=20, axes=None, **kwargs):
"""Plot marginal distributions for parameters.
Supports only univariate distributions.
Parameters
----------
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
bins : int, optional
Number of bins in histograms.
axes : one or an iterable of plt.Axes, optional
Returns
-------
axes : np.array of plt.Axes
"""
if self.is_multivariate:
print("Plotting multivariate distributions is unsupported.")
else:
return vis.plot_marginals(self.samples, selector, bins, axes, **kwargs)
def plot_pairs(self, selector=None, bins=20, axes=None, **kwargs):
"""Plot pairwise relationships as a matrix with marginals on the diagonal.
The y-axis of marginal histograms are scaled.
Supports only univariate distributions.
Parameters
----------
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
bins : int, optional
Number of bins in histograms.
axes : one or an iterable of plt.Axes, optional
Returns
-------
axes : np.array of plt.Axes
"""
if self.is_multivariate:
print("Plotting multivariate distributions is unsupported.")
else:
return vis.plot_pairs(self.samples, selector, bins, axes, **kwargs)
class SmcSample(Sample):
"""Container for results from SMC-ABC."""
def __init__(self, method_name, outputs, parameter_names, populations, *args, **kwargs):
"""Initialize result.
Parameters
----------
method_name : str
outputs : dict
parameter_names : list
populations : list[Sample]
List of Sample objects
args
kwargs
"""
super(SmcSample, self).__init__(
method_name=method_name,
outputs=outputs,
parameter_names=parameter_names,
*args,
**kwargs)
self.populations = populations
if self.weights is None:
raise ValueError("No weights provided for the sample")
@property
def n_populations(self):
"""Return the number of populations."""
return len(self.populations)
def summary(self, all=False):
"""Print a verbose summary of contained results.
Parameters
----------
all : bool, optional
Whether to print the summary for all populations separately,
or just the final population (default).
"""
super(SmcSample, self).summary()
if all:
for i, pop in enumerate(self.populations):
print('\nPopulation {}:'.format(i))
pop.summary()
def sample_means_summary(self, all=False):
"""Print a representation of sample means.
Parameters
----------
all : bool, optional
Whether to print the means for all populations separately,
or just the final population (default).
"""
if all is False:
super(SmcSample, self).sample_means_summary()
return
out = ''
for i, pop in enumerate(self.populations):
out += "Sample means for population {}: ".format(i)
out += ', '.join(["{}: {:.3g}".format(k, v) for k, v in pop.sample_means.items()])
out += '\n'
print(out)
def plot_marginals(self, selector=None, bins=20, axes=None, all=False, **kwargs):
"""Plot marginal distributions for parameters for all populations.
Parameters
----------
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
bins : int, optional
Number of bins in histograms.
axes : one or an iterable of plt.Axes, optional
all : bool, optional
Plot the marginals of all populations
"""
if all is False:
super(SmcSample, self).plot_marginals()
return
fontsize = kwargs.pop('fontsize', 13)
for i, pop in enumerate(self.populations):
pop.plot_marginals(selector=selector, bins=bins, axes=axes)
plt.suptitle("Population {}".format(i), fontsize=fontsize)
def plot_pairs(self, selector=None, bins=20, axes=None, all=False, **kwargs):
"""Plot pairwise relationships as a matrix with marginals on the diagonal.
The y-axis of marginal histograms are scaled.
Parameters
----------
selector : iterable of ints or strings, optional
Indices or keys to use from samples. Default to all.
bins : int, optional
Number of bins in histograms.
axes : one or an iterable of plt.Axes, optional
all : bool, optional
Plot for all populations
"""
if all is False:
super(SmcSample, self).plot_marginals()
return
fontsize = kwargs.pop('fontsize', 13)
for i, pop in enumerate(self.populations):
pop.plot_pairs(selector=selector, bins=bins, axes=axes)
plt.suptitle("Population {}".format(i), fontsize=fontsize)
class BolfiSample(Sample):
"""Container for results from BOLFI."""
def __init__(self, method_name, chains, parameter_names, warmup, **kwargs):
"""Initialize result.
Parameters
----------
method_name : string
Name of inference method.
chains : np.array
Chains from sampling, warmup included. Shape: (n_chains, n_samples, n_parameters).
parameter_names : list : list of strings
List of names in the outputs dict that refer to model parameters.
warmup : int
Number of warmup iterations in chains.
"""
chains = chains.copy()
shape = chains.shape
n_chains = shape[0]
warmed_up = chains[:, warmup:, :]
concatenated = warmed_up.reshape((-1,) + shape[2:])
outputs = dict(zip(parameter_names, concatenated.T))
super(BolfiSample, self).__init__(
method_name=method_name,
outputs=outputs,
parameter_names=parameter_names,
chains=chains,
n_chains=n_chains,
warmup=warmup,
**kwargs)
def plot_traces(self, selector=None, axes=None, **kwargs):
"""Plot MCMC traces."""
return vis.plot_traces(self, selector, axes, **kwargs)
class RomcSample(Sample):
"""Container for results from ROMC."""
def __init__(self, method_name,
outputs,
parameter_names,
discrepancy_name,
weights,
**kwargs):
"""Class constructor.
Parameters
----------
method_name: string
Name of the inference method
outputs: Dict
Dict where key is the parameter name and value are the samples
parameter_names: List[string]
List of the parameter names
discrepancy_name: string
name of the output (=distance) node
weights: np.ndarray
the weights of the samples
kwargs
"""
super(RomcSample, self).__init__(
method_name, outputs, parameter_names,
discrepancy_name=discrepancy_name, weights=weights, kwargs=kwargs)
def samples_cov(self):
"""Print the empirical covariance matrix.
Returns
-------
np.ndarray (D,D)
the covariance matrix
"""
samples = self.samples_array
weights = self.weights
cov_mat = np.cov(samples, rowvar=False, aweights=weights)
return cov_mat
| bsd-3-clause |
centic9/subversion-ppa | tools/dev/wc-ng/graph-data.py | 7 | 1757 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import pylab
import numpy as np
import csv
import sys
min_rev = 35000
data_reader = csv.reader(open('data.csv'))
data = []
for row in data_reader:
row = row[:-1]
if row[0] == 'Revision':
data.append(row)
continue
if int(row[0]) < min_rev:
continue
for i, x in enumerate(row):
if i <= 1:
row[i] = int(row[i])
else:
row[i] = int(row[i-1]) + int(row[i])
data.append(row)
x = [d[0] for d in data[1:]]
data = [d[1:] for d in data]
y = zip(*data)
l = []
for i, foo in enumerate(y):
ln = plt.plot(x, foo[1:], linewidth=1)
l.append(ln)
plt.figlegend(l, data[0], 'lower left')
plt.fill_between(x, 0, y[0][1:], facecolor=l[0].color)
#for i in range(0, len(y)-1):
# plt.fill_between(x, y[i][1:], y[i+1][1:])
plt.xlabel('Revision')
plt.ylabel('Symbol Count')
plt.show()
png = open('chart2.png', 'w')
plt.savefig(png)
| apache-2.0 |
RalphBariz/RalphsDotNet | Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/scipy/signal/ltisys.py | 53 | 23848 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
| gpl-3.0 |
INGEOTEC/microTC | microtc/tests/test_utils.py | 1 | 2697 | # author: Eric S. Tellez
def test_params():
from microtc.params import ParameterSelection
import numpy as np
from numpy.random import random
sel = ParameterSelection()
def fake_score(conf_code):
conf = conf_code[0]
conf['_score'] = random()
conf['_time'] = 1.0
return conf
sel.search(fake_score, bsize=64)
def test_read_data_labels():
import os
from microtc.utils import read_data_labels
filename = os.path.join(os.path.dirname(__file__), "text.json")
read_data_labels(filename)
def test_wrapper_score():
from microtc.scorewrapper import ScoreKFoldWrapper
from sklearn.metrics import f1_score
import numpy as np
np.random.seed(0)
y = np.random.randint(3, size=100).astype(np.str)
hy = np.random.randint(3, size=100)
w = ScoreKFoldWrapper([], y, score='avgf1:0:2', nfolds=10)
conf = {}
w.compute_score(conf, hy)
f1 = f1_score(y.astype(np.int), hy, average=None)
assert conf['_accuracy'] == (y.astype(np.int) == hy).mean()
print(y)
print(conf['_avgf1:0:2'], (f1[0] + f1[2]) / 2.)
assert conf['_avgf1:0:2'] == (f1[0] + f1[2]) / 2.
def test_counter():
from microtc.utils import Counter, save_model, load_model
import os
c = Counter()
c.update([1, 2, 3, 1])
c.update([3])
assert c[1] == 2
print(c.update_calls)
assert c.update_calls == 2
save_model(c, "t.voc")
cc = load_model("t.voc")
os.unlink("t.voc")
print(cc.update_calls, "**")
assert cc.update_calls == 2
def test_counter_add():
from microtc.utils import Counter
c1 = Counter()
c1.update(range(10))
c2 = Counter()
c2.update(range(5, 15))
r = c1 + c2
print(r)
assert isinstance(r, Counter)
for i in range(5):
assert r[i] == 1
for i in range(5, 10):
assert r[i] == 2
for i in range(10, 15):
assert r[i] == 1
assert r.update_calls == 2
def test_counter_sub():
from microtc.utils import Counter
c1 = Counter()
c1.update(range(10))
c2 = Counter()
c2.update(range(5, 15))
r = c1 + c2
re = r - c1
print(re)
assert isinstance(re, Counter)
for k, v in re.items():
assert c2[k] == v
for k, v in c2.items():
assert re[k] == v
assert re.update_calls == 1
def test_counter_json():
from microtc.utils import Counter
c1 = Counter()
c1.update(list(map(str, range(10))))
print(c1)
js = c1.tojson()
print(js)
c2 = Counter.fromjson(js)
assert c1.update_calls == c2.update_calls
print(c2)
for x, v in c1.items():
print(x, v, c2[x])
assert x in c2 and v == c2[x] | apache-2.0 |
cojacoo/testcases_echoRD | gen_test_col1.py | 1 | 4227 | import numpy as np
import pandas as pd
import scipy as sp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os, sys
try:
import cPickle as pickle
except:
import pickle
#connect echoRD Tools
pathdir='../echoRD' #path to echoRD
lib_path = os.path.abspath(pathdir)
#sys.path.append(lib_path)
sys.path.append('/home/ka/ka_iwg/ka_oj4748/echoRD/echoRD')
import vG_conv as vG
from hydro_tools import plotparticles_t,hydroprofile,plotparticles_column
# Prepare echoRD
#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr,mc,mcp,pdyn,cinf,vG]=rE.loadconnect(pathdir='../',mcinif='mcini_g63',experimental=True)
mc = mcp.mcpick_out(mc,'g63.pickle')
runname='gen_test_col1'
mc.advectref='Shipitalo'
mc.soilmatrix=pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1-1/mc.soilmatrix.n,0.1)
precTS=pd.read_csv(mc.precf, sep=',',skiprows=3)
precTS.tstart-=340
precTS.tend-=340
precTS.intense=2.*0.063*60./1000.# intensity in m3/s
#use modified routines for binned retention definitions
#mc.part_sizefac=500
mc.gridcellA=abs(mc.mgrid.vertfac*mc.mgrid.latfac)
mc.particleA=abs(mc.gridcellA.values)/(2*mc.part_sizefac) #assume average ks at about 0.5 as reference of particle size
mc.particleD=2.*np.sqrt(mc.particleA/np.pi)
mc.particleV=3./4.*np.pi*(mc.particleD/2.)**3.
mc.particleV/=np.sqrt(abs(mc.gridcellA.values)) #assume grid size as 3rd dimension
mc.particleD/=np.sqrt(abs(mc.gridcellA.values))
#for column:
total_volume=np.pi*0.5**3
mc.particleV=total_volume/(mc.mgrid.vertgrid[0]*mc.mgrid.latgrid[0]*(2*mc.part_sizefac))
mc.particlemass=dr.waterdensity(np.array(20),np.array(-9999))*mc.particleV #assume 20C as reference for particle mass
mc=dr.ini_bins(mc)
mc=dr.mc_diffs(mc,np.max(np.max(mc.mxbin)))
[mc,particles,npart]=dr.particle_setup(mc)
#define bin assignment mode for infiltration particles
mc.LTEdef='instant'#'ks' #'instant' #'random'
mc.LTEmemory=mc.soilgrid.ravel()*0.
#new reference
mc.maccon=np.where(mc.macconnect.ravel()>0)[0] #index of all connected cells
mc.md_macdepth=np.abs(mc.md_macdepth)
mc.prects='column2'
mc.colref=False
#theta=mc.zgrid[:,1]*0.+0.273
#[mc,particles,npart]=rE.particle_setup_obs(theta,mc,vG,dr,pdyn)
[thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
#[A,B]=plotparticles_t(particles,thS/100.,mc,vG,store=True)
# Run Model
mc.LTEpercentile=70 #new parameter
t_end=24.*3600.
saveDT=True
#1: MDA
#2: MED
#3: rand
infiltmeth='MDA'
#3: RWdiff
#4: Ediss
#exfiltmeth='RWdiff'
exfiltmeth='Ediss'
#5: film_uconst
#6: dynamic u
film=True
#7: maccoat1
#8: maccoat10
#9: maccoat100
macscale=1. #scale the macropore coating
clogswitch=False
infiltscale=False
#mc.dt=0.11
#mc.splitfac=5
#pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
#import profile
#%prun -D diff_pd_prof.prof pdyn.part_diffusion_binned_pd(particles,npart,thS,mc)
wdir='/beegfs/work/ka_oj4748/gen_tests'
drained=pd.DataFrame(np.array([]))
leftover=0
output=60. #mind to set also in TXstore.index definition
dummy=np.floor(t_end/output)
t=0.
ix=0
TSstore=np.zeros((int(dummy),mc.mgrid.cells[0],2))
try:
#unpickle:
with open(''.join([wdir,'/results/Z',runname,'_Mstat.pick']),'rb') as handle:
pickle_l = pickle.load(handle)
dummyx = pickle.loads(pickle_l)
particles = pickle.loads(dummyx[0])
[leftover,drained,t,TSstore,ix] = pickle.loads(dummyx[1])
ix+=1
print('resuming into stored run at t='+str(t)+'...')
except:
print('starting new run...')
#loop through plot cycles
for i in np.arange(dummy.astype(int))[ix:]:
plotparticles_column(particles,mc,pdyn,vG,runname,t,i,saving=True,relative=False,wdir=wdir)
[particles,npart,thS,leftover,drained,t]=rE.CAOSpy_rundx1(i*output,(i+1)*output,mc,pdyn,cinf,precTS,particles,leftover,drained,6.,splitfac=4,prec_2D=False,maccoat=macscale,saveDT=saveDT,clogswitch=clogswitch,infilt_method=infiltmeth,exfilt_method=exfiltmeth,film=film,infiltscale=infiltscale)
TSstore[i,:,:]=rE.part_store(particles,mc)
#if i/5.==np.round(i/5.):
with open(''.join([wdir,'/results/X',runname,'_Mstat.pick']),'wb') as handle:
pickle.dump(pickle.dumps([leftover,drained,t,TSstore,i]), handle, protocol=2)
| gpl-3.0 |
cponce512/FLiER_Test_Suite | Code/FLiER_Substation.py | 1 | 27041 | """This file is part of the FLiER Test Suite.
The FLiER Test Suite is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
The FLiER Test Suite is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the FLiER Test Suite. If not, see
<http://www.gnu.org/licenses/>.
Copyright Colin Ponce 2015.
"""
from PowerNetwork import PowerNetwork
from Bus import Ring_Substation
from numpy import dot
from Power_Utils import powerflow, powerflow_jacob
from Utils import precision_lstsq
import numpy as np
import scipy.sparse.linalg as spsl
import scipy.sparse as sps
import matplotlib.pyplot as plt
import time
class FLiER_Substation(object):
def __init__(self, powernet, pre_event_volts, pmus,
eager_construction = True, verbose=False):
self.v = pre_event_volts
self.powernet = powernet
self.n = powernet.nb
self.Y = powernet.Y
self.verbose = verbose
self.pmus = pmus
self._en = None
self._num_slave_nodes = None
self._substations = None
self._rev = None
self._ev = None
self._eindr2reindr = None
self._reindr2eindr = None
self._extY = None
self._dHdv = None
self._C1_columns = None
self._C2_columns = None
self._C = None
self._A = None
self._Afac = None
self._E = None
self._EAinv = None
self._eb = None
self._lamed = None
if eager_construction:
x = self.substations
x = self.num_slave_nodes
x = self.rev
x = self.ev
x = self.eindr2reindr
x = self.reindr2eindr
x = self.extY
x = self.dHdv
x = self.C
x = self.A
x = self.Afac
x = self.E
x = self.EAinv
x = self.eb
x = self.lamed
@property
def substations(self):
"""A list of Ring_Substation objects in the network."""
if self._substations is None:
substations = [0]*self.n
# Create the set of Ring_Substations
for bus in self.powernet.buses:
substations[bus.index] = Ring_Substation(bus)
assert not any([sub == 0 for sub in substations])
assert len(substations) == self.n
# Connect Ring_Substations together.
for branch in self.powernet.branches.values():
sub1 = substations[branch.buses[0].index]
sub2 = substations[branch.buses[1].index]
Ring_Substation.connect_nodes(sub1, sub2, branch)
# Set extended indices.
eind = 0
for sub in substations:
sub.set_extended_indices(eind)
eind += sub.num_nodes()
self._en = eind
self._substations = substations
return self._substations
@property
def en(self):
"""The 'extended' size of the network, i.e. the number of
substation nodes."""
assert self._en is not None # created in self.substations()
return self._en
@property
def num_slave_nodes(self):
"""The number of slave nodes in this network."""
if self._num_slave_nodes is None:
self._num_slave_nodes = self.en - self.n
return self._num_slave_nodes
def branch_endpoint_einds(self, branch):
"""Get the extended indices of the nodes at either
end of this branch."""
bus0ind = branch.buses[0].index
node0ind = self.substations[bus0ind].branch2node(branch).eind
bus1ind = branch.buses[1].index
node1ind = self.substations[bus1ind].branch2node(branch).eind
return node0ind, node1ind
@property
def eind2node(self):
"""A list that converts extended index into node."""
if self._eind2node is None:
self._eind2node = [0]*self.en
for sub in self.substations:
for node in sub.nodes:
self._eind2node[node.eind] = node
return self._eind2node
def set_reordering_lists(self):
"""See the doc for eind2reindr."""
self._eindr2reindr = np.zeros(2*self.en, dtype=int)
self._reindr2eindr = np.zeros(2*self.en, dtype=int)
curr_master_ind = 0
curr_slave_ind = 2*self.n
enmn = self.en - self.n
for sub in self.substations:
master_eind = sub.nodes[0].eind
self._eindr2reindr[master_eind] = curr_master_ind
self._eindr2reindr[master_eind+self.en] = curr_master_ind+self.n
self._reindr2eindr[curr_master_ind] = master_eind
self._reindr2eindr[curr_master_ind+self.n] = master_eind+self.en
curr_master_ind += 1
for node in sub.nodes[1:]:
eind = node.eind
self._eindr2reindr[eind] = curr_slave_ind
self._eindr2reindr[eind + self.en] = curr_slave_ind + enmn
self._reindr2eindr[curr_slave_ind] = eind
self._reindr2eindr[curr_slave_ind + enmn] = eind + self.en
curr_slave_ind += 1
@property
def eindr2reindr(self):
"""eind is extended index, with length en.
eindr is extended index for real formulation, which has length 2*en,
and maps every node to a pair of indices, for magnitude and phase
angle.
reind is reordered extended index, which puts master nodes first,
followed by slave nodes.
reindr is reordered extended index for real formulation, which has order
(master node magnitude variables), (master node phase angle variables),
(slave node magnitude variables), (slave node phase angle variables)"""
if self._eindr2reindr is None:
self.set_reordering_lists()
return self._eindr2reindr
@property
def reindr2eindr(self):
"""See the doc for eind2reindr. This is the inverse."""
if self._reindr2eindr is None:
self.set_reordering_lists()
return self._reindr2eindr
@property
def extY(self):
"""The extended node-wise admittance matrix. Has size en x en rather
than n x n."""
if self._extY is None:
extY = sps.lil_matrix((self.en, self.en), dtype=np.complex)
for branch in self.powernet.branches.values():
node0ind, node1ind = self.branch_endpoint_einds(branch)
edge_adm = branch.admittance
Yii = (edge_adm + 1j*branch.line_charging_susc / 2.0)
if branch.nonunity_tap:
tap = branch.tap
extY[node0ind, node0ind] += Yii / (tap*np.conj(tap))
extY[node1ind, node1ind] += Yii
extY[node0ind, node1ind] -= edge_adm / np.conj(tap)
extY[node1ind, node0ind] -= edge_adm / tap
else:
extY[node0ind, node0ind] += Yii
extY[node1ind, node1ind] += Yii
extY[node0ind, node1ind] -= edge_adm
extY[node1ind, node0ind] -= edge_adm
for sub in self.substations:
for node in sub.nodes:
extY[node.eind, node.eind] += node.shunt_adm
self._extY = extY
return self._extY
@property
def E(self):
"""A matrix that project a voltage vector onto the observable subspace.
The observable subspace is axis-aligned and associated with voltage
magnitudes and phase angles associatd with those buses that the PMUs
can observe (the PMU buses and their immediate neighbors).
"""
if self._E is None:
phase_vars = []
mag_vars = []
buses_observed = []
for p in self.pmus:
sub = self.substations[p]
if p not in buses_observed:
master_eind = sub.nodes[0].eind
phase_vars.append(self.eindr2reindr[master_eind])
mag_vars.append(self.eindr2reindr[master_eind+self.en])
buses_observed.append(p)
for node in sub.nodes[2:]:
opp_sub_ind = node.opposing_node.substation.index
if opp_sub_ind not in buses_observed:
opp_node_eind = node.opposing_node.eind
phase_vars.append(self.eindr2reindr[opp_node_eind])
mag_vars.append(self.eindr2reindr[opp_node_eind+self.en])
buses_observed.append(opp_sub_ind)
lpn = len(phase_vars) + len(mag_vars)
assert lpn == 2 * len(phase_vars)
ijinds = np.vstack([[np.arange(lpn)],
[np.hstack([phase_vars, mag_vars])]])
shape = (lpn, self.dHdv.shape[0] + self.C.shape[1])
self._E = sps.csr_matrix((np.ones(lpn), ijinds), shape=shape)
return self._E
@property
def eb(self):
"""Power injections, extended, by node."""
if self._eb is None:
eb = np.zeros(2*self.en)
for sub in self.substations:
for node in sub.nodes:
injection = node.generation - node.load
eb[node.eind] = np.real(injection)
eb[node.eind+self.en] = np.imag(injection)
self._eb = eb
return self._eb
@property
def lamed(self):
"""lamed, the Phoenician name for lambda. (As the word lambda is
reserved in Python.)"""
if self._lamed is None:
eH = powerflow(self.ev, self.extY)
resid = self.eb - eH
rresid = resid[self.reindr2eindr]
out = spsl.lsqr(self.C, rresid, atol=1e-14, btol=1e-14)
# assert out[3] < 1e-8
self._lamed = np.matrix(out[0]).T
return self._lamed
@staticmethod
def extend_vector(substations, vec, en):
"""Convert a bus-level voltage set into a node-level voltage set."""
n = len(vec) / 2
ext_vec = np.zeros(2*en, dtype=vec.dtype)
assert en == substations[-1].nodes[-1].eind+1
for sub in substations:
for node in sub.nodes:
ext_vec[node.eind] = vec[sub.index]
ext_vec[node.eind+en] = vec[sub.index+n]
return ext_vec
@staticmethod
def extend_complex_vector(substations, vec, en):
n = len(vec)
ext_vec = np.zeros(en, dtype=complex)
assert en == substations[-1].nodes[-1].eind+1
for sub in substations:
for node in sub.nodes:
ext_vec[node.eind] = vec[sub.index]
return ext_vec
@property
def ev(self):
"""Extended voltages. The voltage of each substation node in the
network, as opposed to each substation (or bus) in the network."""
if self._ev is None:
self._ev = FLiER_Substation.extend_vector(self.substations,
self.v, self.en)
return self._ev
@property
def rev(self):
"""Reordered extended voltages."""
if self._rev is None:
self._rev = self.ev[self.reindr2eindr]
return self._rev
@property
def dHdv(self):
"""Compute the reordered extended Jacobian matrix."""
if self._dHdv is None:
en = self.en
extY = self.extY
e2r = self.eindr2reindr
vM = self.ev[en:]
vTheta = self.ev[:en]
vC = vM * np.exp(1j * vTheta)
vA = np.exp(1j*vTheta)
s = powerflow(self.ev, extY)
s = s[:en] + 1j * s[en:]
ij = []
data = []
def add_element(i,j,val):
ij.append(i)
ij.append(j)
data.append(val)
for i,j in zip(*extY.nonzero()):
val = np.imag(vC[i] * np.conj(extY[i,j] * vC[j]))
add_element(e2r[i], e2r[j], val)
val = np.real(vC[i] * np.conj(extY[i,j] * vA[j]))
add_element(e2r[i], e2r[en+j], val)
val = np.real(-vC[i] * np.conj(extY[i,j] * vC[j]))
add_element(e2r[en+i], e2r[j], val)
val = np.imag(vC[i] * np.conj(extY[i,j] * vA[j]))
add_element(e2r[en+i], e2r[en+j], val)
extYvC = extY.dot(vC)
for i in xrange(en):
if s[i] == 0 and extYvC[i] == 0:
continue
add_element(e2r[ i], e2r[ i], -np.imag(s[i]))
add_element(e2r[ i], e2r[en+i],
np.real(vA[i] * np.conj(extYvC[i])))
add_element(e2r[en+i], e2r[ i], np.real(s[i]))
add_element(e2r[en+i], e2r[en+i],
np.imag(vA[i] * np.conj(extYvC[i])))
ij = np.reshape(ij, (2, len(ij)/2), order='F')
self._dHdv = sps.csr_matrix((data, ij), shape=(2*en, 2*en))
return self._dHdv
def get_C1_columns(self, bus_ind):
"""
The matrix C1 is as described in the comment for C. Every PV
and Slack node in the network is given a column in C1. This
function maps PV and slack nodes to columns of C1.
"""
if self._C1_columns is None:
self._C1_columns = [0]*self.n
curr_col = 0
for sub in self.substations:
if sub.type == 'PQ':
self._C1_columns[sub.index] = None
elif sub.type == 'PV':
self._C1_columns[sub.index] = (curr_col,)
curr_col += 1
else:
assert sub.type == 'Slack'
self._C1_columns[sub.index] = (curr_col, curr_col+1)
curr_col += 2
return self._C1_columns[bus_ind]
def get_C2_columns(self, node_eind):
"""
The matrix C2 is as described in the comment for C. Every slave
node is given a column in C2. This function maps slave node
indices to columns of C2. This is also used to index into the
rows of the matrix U.
"""
if self._C2_columns is None:
self._C2_columns = [0]*self.en
curr_col = 0
for sub in self.substations:
self._C2_columns[sub.nodes[0].eind] = None
for node in sub.nodes[1:]:
self._C2_columns[node.eind] = (curr_col, curr_col+1)
curr_col += 2
return self._C2_columns[node_eind]
@property
def C(self):
"""This matrix has two parts.
C1: Allows PV and Slack inds to add arbitrary amounts of reactive
power (PV) or real and reactive powers (Slack) to their power
injections.
C2: Allows a substation to move power injection freely around
the various nodes in the substation."""
if self._C is None:
rows = self.dHdv.shape[0]
num_C1_cols = len(self.powernet.pv_inds) + 2 * len(self.powernet.slack_inds)
num_C2_cols = 2 * self.num_slave_nodes
C1 = sps.lil_matrix((rows, num_C1_cols))
for sub in self.substations:
c1_cols = self.get_C1_columns(sub.index)
if sub.type == 'PQ':
assert c1_cols is None
continue
elif sub.type == 'PV':
assert len(c1_cols) == 1
C1[self.eindr2reindr[sub.nodes[0].eind+self.en], c1_cols[0]] = 1
else:
assert sub.type == 'Slack'
assert len(c1_cols) == 2
C1[self.eindr2reindr[sub.nodes[0].eind], c1_cols[0]] = 1
C1[self.eindr2reindr[sub.nodes[0].eind+self.en], c1_cols[1]] = 1
C2 = sps.lil_matrix((rows, num_C2_cols))
for sub in self.substations:
master_mag_ind = self.eindr2reindr[sub.nodes[0].eind]
master_phase_ind = self.eindr2reindr[sub.nodes[0].eind+self.en]
assert master_mag_ind == sub.index
assert master_phase_ind == sub.index + self.n
for node in sub.nodes[1:]:
curr_mag_ind = self.eindr2reindr[node.eind]
curr_phase_ind = self.eindr2reindr[node.eind+self.en]
c2_cols = self.get_C2_columns(node.eind)
assert c2_cols is not None
C2[curr_phase_ind , c2_cols[0]] = 1
C2[master_phase_ind, c2_cols[0]] = -1
C2[curr_mag_ind , c2_cols[1]] = 1
C2[master_mag_ind , c2_cols[1]] = -1
C = sps.bmat([[C1, C2]], format='csr')
self._C = C
return self._C
@property
def A(self):
""" The matrix A as described on Page 3 of the paper."""
if self._A is None:
self._A = sps.bmat([[self.dHdv, self.C],
[self.C.T, None]], format='csc').real
return self._A
@property
def Afac(self):
""" A pre-factorized A matrix to be used in linear solves."""
if self._Afac is None:
self._Afac = spsl.factorized(self.A)
return self._Afac
@property
def EAinv(self):
""" $E A^{-1}$, the projection of $A^{-1}$ onto the observable
subspace."""
if self._EAinv is None:
AT = self.A.T.tocsc()
self._EAinv = (spsl.spsolve(AT, self.E.T).T).todense(order='F')
return self._EAinv
###Enter FLiER Code
def find_topology_error(self, post_event_volts, test_type, use_filter=True):
""" Run FLiER for substation reconfigurations.
*Contingencies have the following format: A tuple.
Element 0: The index of the substation that split.
Element 1: The set of nodes of that substation that split away.
Note that single line failures can be described as the event (sub, node),
where sub is one of the substations attached to the failed line and node
is the node attached to the line that failed.
Inputs:
post_event_volts (List of doubles) - A list of the observable voltages.
Contains phase angles followed by voltage magnitudes.
test_type ("Full", "Single_Lines") - Whether this is a full contingency
search or a line failure search.
use_filter (True, False) - Whether or not to use FLiER's filter. Default
is True.
Output:
score_dict (dictionary) - A dictionary with keys that are contingencies*
and values that are t_ij scores. The "no contingency" case has key -1.
filter_score_dict (dictionary) - Same format as score_dict, but contains
filter scores tau_ij.
ts_computed (int) - The number of t_ij's computed.
"""
preEv = self.E[:,:2*self.en].dot(self.rev)
dv = post_event_volts - preEv
lendv = len(dv)
dvm = dv[lendv/2:] # Change in voltage magnitudes
dvth = dv[:lendv/2] # Change in voltage phase angles
prevm = preEv[lendv/2:]
Efingerprint = np.matrix(np.hstack([dvth*prevm, dvm])).T
if self.verbose:
print "Initializing topology error search..."
start_time = time.clock()
filter_score_dict = self.get_filter_scores(Efingerprint, test_type)
mid_time = time.clock()
score_dict = self.get_scores(Efingerprint, filter_score_dict,
use_filter=use_filter)
end_time = time.clock()
if self.verbose:
print "Topology error search completed."
print "Filter time: {0}, Fingerprint time: {1}, Total time: {2}".format(
mid_time - start_time, end_time - mid_time, end_time - start_time)
fraction_ts_computed = float(len(score_dict)-1) / len(filter_score_dict)
return score_dict, filter_score_dict, fraction_ts_computed
def get_system_matrix_extension(self, split_einds):
""" Compute the U matrix for a given contingency as described on page 3
of the paper.
U is of the form [0 F^T]^T, where F \in {0, 1}^{n \times 2} indicates
the rows of the extended system matrix that constrain the voltages of
those nodes splitting away to match the master node. This allows us
to add a slack variable that gives those nodes a different voltage.
The matrix U^T adds extra constraints to the system that ensures no
power injection is shared across open breakers in the splitting
substation.
Inputs:
split_einds (List of ints) - The extended indices of the nodes that are
splitting in this contingency.
Output:
A csc_matrix U.
"""
ind_offset = (self.dHdv.shape[0] + len(self.powernet.pv_inds) +
2*len(self.powernet.slack_inds))
U_rows = zip(*[self.get_C2_columns(sei) for sei in split_einds])
U_rows = (ind_offset + np.array(U_rows[0]), ind_offset + np.array(U_rows[1]))
num_split = len(split_einds)
row_ind = np.hstack([U_rows[0], U_rows[1]])
col_ind = np.hstack([np.zeros(num_split), np.ones(num_split)])
data = np.ones(2*num_split)
return sps.csc_matrix((data, (row_ind, col_ind)),
shape=(self.A.shape[0], 2))
def get_subspace(self, split_einds):
""" Get the filtering fingerprint subspace for this contingency.
Inputs:
split_einds (List of ints) - The extended indices of the nodes that are
splitting in this contingency.
Output:
sps.csc_matrix. The two-column matrix $\overline{E} A^{-1} U$.
"""
U = self.get_system_matrix_extension(split_einds)
subspace = self.EAinv * U
# Some subspace matrices are singular. Remove columns from those
# so that they are no longer singular. This step may not be necessary,
# as I think the lstsq functions tend to be cool with singular matrices.
while (np.linalg.svd(subspace, compute_uv=False)[-1] < 1e-9 and
subspace.shape[1] > 1):
subspace = subspace[:, :-1]
return subspace
def get_filter_scores(self, Efingerprint, test_type):
""" Compute the filter scores tau.
Run through each possible contingency and compute the filter score
tau for that contingency. Create a filter_score_dict with the results.
Inputs:
Efingerprint (np.matrix) - The voltage change fingerprint.
test_type ("Full", "Single_Lines") - The type of contingencies for
which to test.
Output:
filter_score_dict (Dictionary) - A dictionary with contingencies as keys
and tau scores as values. "No contingency" is not included in this
dictionary.
"""
filter_score_dict = dict()
filter_scores_computed = 0
for sub in self.substations:
for splitting_nodes in sub.node_iterator(test_type):
split_einds = np.array([sub.nodes[sn].eind for
sn in splitting_nodes])
key = (sub.index, tuple(splitting_nodes))
subspace = self.get_subspace(split_einds)
x = precision_lstsq(subspace, Efingerprint, ITERATIONS = 1)
Efing_sub = subspace.dot(x)
tau_score = np.linalg.norm(Efingerprint - Efing_sub)
filter_score_dict[key] = tau_score
filter_scores_computed += 1
return filter_score_dict
def get_scores(self, Efingerprint, filter_score_dict, ts_to_keep = 1,
use_filter = True):
""" Compute the actual scores t.
Inputs:
Efingerprint (np.matrix) - The voltage change fingerprint.
filter_score_dict (Dictionary) - A dictionary with contingencies as keys
and tau scores as values. "No contingency" is not included in this
dictionary.
ts_to_keep (int) - As we iterate over possible contingencies, we only
compute its t score if its filter (tau) score is below the
ts_to_keep'th lowest t_ij computed so far. A higher number increases
computational cost but lowers the chances of filtering out the
correct answer. The paper only discusses the default value of 1.
use_filter (True, False) - Whether or not to filter based on filter
(tau) scores. Default is True.
"""
sort_filtersc = sorted(filter_score_dict.items(), key = lambda x : x[1])
min_ts = np.array([np.Inf]*ts_to_keep)
score_dict = dict()
score_dict[-1] = np.linalg.norm(Efingerprint) # Represents "no topology
# change."
for i in xrange(len(sort_filtersc)):
key, tau = sort_filtersc[i]
splitbus, splitting_nodes = key
if (filter_score_dict[key] - np.max(min_ts) > 0) and use_filter:
continue
split_einds = np.array([self.substations[splitbus].nodes[sn].eind
for sn in splitting_nodes])
U = self.get_system_matrix_extension(split_einds)
FT = U[-len(self.lamed):,:].T
UAinvU = U.T * self.Afac(U.todense())
try:
gamma = np.linalg.solve(UAinvU, FT * self.lamed)
except np.linalg.LinAlgError:
gamma = precision_lstsq(UAinvU, FT * self.lamed, ITERATIONS = 1)
Efing_approx = -self.EAinv * (U * gamma)
t_score = np.linalg.norm(Efingerprint - Efing_approx)
score_dict[key] = t_score
if t_score < np.max(min_ts):
min_ts[np.argmax(min_ts)] = t_score
return score_dict
| gpl-3.0 |
cgranade/qutip | qutip/bloch.py | 4 | 26736 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
__all__ = ['Bloch']
import os
from numpy import (ndarray, array, linspace, pi, outer, cos, sin, ones, size,
sqrt, real, mod, append, ceil, arange)
from qutip.qobj import Qobj
from qutip.expect import expect
from qutip.operators import sigmax, sigmay, sigmaz
try:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
except:
pass
class Bloch():
"""Class for plotting data on the Bloch sphere. Valid data can be
either points, vectors, or qobj objects.
Attributes
----------
axes : instance {None}
User supplied Matplotlib axes for Bloch sphere animation.
fig : instance {None}
User supplied Matplotlib Figure instance for plotting Bloch sphere.
font_color : str {'black'}
Color of font used for Bloch sphere labels.
font_size : int {20}
Size of font used for Bloch sphere labels.
frame_alpha : float {0.1}
Sets transparency of Bloch sphere frame.
frame_color : str {'gray'}
Color of sphere wireframe.
frame_width : int {1}
Width of wireframe.
point_color : list {["b","r","g","#CC6600"]}
List of colors for Bloch sphere point markers to cycle through.
i.e. By default, points 0 and 4 will both be blue ('b').
point_marker : list {["o","s","d","^"]}
List of point marker shapes to cycle through.
point_size : list {[25,32,35,45]}
List of point marker sizes. Note, not all point markers look
the same size when plotted!
sphere_alpha : float {0.2}
Transparency of Bloch sphere itself.
sphere_color : str {'#FFDDDD'}
Color of Bloch sphere.
figsize : list {[7,7]}
Figure size of Bloch sphere plot. Best to have both numbers the same;
otherwise you will have a Bloch sphere that looks like a football.
vector_color : list {["g","#CC6600","b","r"]}
List of vector colors to cycle through.
vector_width : int {5}
Width of displayed vectors.
vector_style : str {'-|>', 'simple', 'fancy', ''}
Vector arrowhead style (from matplotlib's arrow style).
vector_mutation : int {20}
Width of vectors arrowhead.
view : list {[-60,30]}
Azimuthal and Elevation viewing angles.
xlabel : list {["$x$",""]}
List of strings corresponding to +x and -x axes labels, respectively.
xlpos : list {[1.1,-1.1]}
Positions of +x and -x labels respectively.
ylabel : list {["$y$",""]}
List of strings corresponding to +y and -y axes labels, respectively.
ylpos : list {[1.2,-1.2]}
Positions of +y and -y labels respectively.
zlabel : list {[r'$\\left|0\\right>$',r'$\\left|1\\right>$']}
List of strings corresponding to +z and -z axes labels, respectively.
zlpos : list {[1.2,-1.2]}
Positions of +z and -z labels respectively.
"""
def __init__(self, fig=None, axes=None, view=None, figsize=None,
background=False):
# Figure and axes
self.fig = fig
self.axes = axes
# Background axes, default = False
self.background = background
# The size of the figure in inches, default = [5,5].
self.figsize = figsize if figsize else [5, 5]
# Azimuthal and Elvation viewing angles, default = [-60,30].
self.view = view if view else [-60, 30]
# Color of Bloch sphere, default = #FFDDDD
self.sphere_color = '#FFDDDD'
# Transparency of Bloch sphere, default = 0.2
self.sphere_alpha = 0.2
# Color of wireframe, default = 'gray'
self.frame_color = 'gray'
# Width of wireframe, default = 1
self.frame_width = 1
# Transparency of wireframe, default = 0.2
self.frame_alpha = 0.2
# Labels for x-axis (in LaTex), default = ['$x$', '']
self.xlabel = ['$x$', '']
# Position of x-axis labels, default = [1.2, -1.2]
self.xlpos = [1.2, -1.2]
# Labels for y-axis (in LaTex), default = ['$y$', '']
self.ylabel = ['$y$', '']
# Position of y-axis labels, default = [1.1, -1.1]
self.ylpos = [1.2, -1.2]
# Labels for z-axis (in LaTex),
# default = [r'$\left|0\right>$', r'$\left|1\right>$']
self.zlabel = [r'$\left|0\right>$', r'$\left|1\right>$']
# Position of z-axis labels, default = [1.2, -1.2]
self.zlpos = [1.2, -1.2]
# ---font options---
# Color of fonts, default = 'black'
self.font_color = 'black'
# Size of fonts, default = 20
self.font_size = 20
# ---vector options---
# List of colors for Bloch vectors, default = ['b','g','r','y']
self.vector_color = ['g', '#CC6600', 'b', 'r']
#: Width of Bloch vectors, default = 5
self.vector_width = 3
#: Style of Bloch vectors, default = '-|>' (or 'simple')
self.vector_style = '-|>'
#: Sets the width of the vectors arrowhead
self.vector_mutation = 20
# ---point options---
# List of colors for Bloch point markers, default = ['b','g','r','y']
self.point_color = ['b', 'r', 'g', '#CC6600']
# Size of point markers, default = 25
self.point_size = [25, 32, 35, 45]
# Shape of point markers, default = ['o','^','d','s']
self.point_marker = ['o', 's', 'd', '^']
# ---data lists---
# Data for point markers
self.points = []
# Data for Bloch vectors
self.vectors = []
# Data for annotations
self.annotations = []
# Number of times sphere has been saved
self.savenum = 0
# Style of points, 'm' for multiple colors, 's' for single color
self.point_style = []
# status of rendering
self._rendered = False
def set_label_convention(self, convention):
"""Set x, y and z labels according to one of conventions.
Parameters
----------
convention : string
One of the following:
- "original"
- "xyz"
- "sx sy sz"
- "01"
- "polarization jones"
- "polarization jones letters"
see also: http://en.wikipedia.org/wiki/Jones_calculus
- "polarization stokes"
see also: http://en.wikipedia.org/wiki/Stokes_parameters
"""
ketex = "$\\left.|%s\\right\\rangle$"
# \left.| is on purpose, so that every ket has the same size
if convention == "original":
self.xlabel = ['$x$', '']
self.ylabel = ['$y$', '']
self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$']
elif convention == "xyz":
self.xlabel = ['$x$', '']
self.ylabel = ['$y$', '']
self.zlabel = ['$z$', '']
elif convention == "sx sy sz":
self.xlabel = ['$s_x$', '']
self.ylabel = ['$s_y$', '']
self.zlabel = ['$s_z$', '']
elif convention == "01":
self.xlabel = ['', '']
self.ylabel = ['', '']
self.zlabel = ['$\\left|0\\right>$', '$\\left|1\\right>$']
elif convention == "polarization jones":
self.xlabel = [ketex % "\\nearrow\\hspace{-1.46}\\swarrow",
ketex % "\\nwarrow\\hspace{-1.46}\\searrow"]
self.ylabel = [ketex % "\\circlearrowleft", ketex %
"\\circlearrowright"]
self.zlabel = [ketex % "\\leftrightarrow", ketex % "\\updownarrow"]
elif convention == "polarization jones letters":
self.xlabel = [ketex % "D", ketex % "A"]
self.ylabel = [ketex % "L", ketex % "R"]
self.zlabel = [ketex % "H", ketex % "V"]
elif convention == "polarization stokes":
self.ylabel = ["$\\nearrow\\hspace{-1.46}\\swarrow$",
"$\\nwarrow\\hspace{-1.46}\\searrow$"]
self.zlabel = ["$\\circlearrowleft$", "$\\circlearrowright$"]
self.xlabel = ["$\\leftrightarrow$", "$\\updownarrow$"]
else:
raise Exception("No such convention.")
def __str__(self):
s = ""
s += "Bloch data:\n"
s += "-----------\n"
s += "Number of points: " + str(len(self.points)) + "\n"
s += "Number of vectors: " + str(len(self.vectors)) + "\n"
s += "\n"
s += "Bloch sphere properties:\n"
s += "------------------------\n"
s += "font_color: " + str(self.font_color) + "\n"
s += "font_size: " + str(self.font_size) + "\n"
s += "frame_alpha: " + str(self.frame_alpha) + "\n"
s += "frame_color: " + str(self.frame_color) + "\n"
s += "frame_width: " + str(self.frame_width) + "\n"
s += "point_color: " + str(self.point_color) + "\n"
s += "point_marker: " + str(self.point_marker) + "\n"
s += "point_size: " + str(self.point_size) + "\n"
s += "sphere_alpha: " + str(self.sphere_alpha) + "\n"
s += "sphere_color: " + str(self.sphere_color) + "\n"
s += "figsize: " + str(self.figsize) + "\n"
s += "vector_color: " + str(self.vector_color) + "\n"
s += "vector_width: " + str(self.vector_width) + "\n"
s += "vector_style: " + str(self.vector_style) + "\n"
s += "vector_mutation: " + str(self.vector_mutation) + "\n"
s += "view: " + str(self.view) + "\n"
s += "xlabel: " + str(self.xlabel) + "\n"
s += "xlpos: " + str(self.xlpos) + "\n"
s += "ylabel: " + str(self.ylabel) + "\n"
s += "ylpos: " + str(self.ylpos) + "\n"
s += "zlabel: " + str(self.zlabel) + "\n"
s += "zlpos: " + str(self.zlpos) + "\n"
return s
def _repr_png_(self):
from IPython.core.pylabtools import print_figure
self.render()
fig_data = print_figure(self.fig, 'png')
plt.close(self.fig)
return fig_data
def _repr_svg_(self):
from IPython.core.pylabtools import print_figure
self.render()
fig_data = print_figure(self.fig, 'svg').decode('utf-8')
plt.close(self.fig)
return fig_data
def clear(self):
"""Resets Bloch sphere data sets to empty.
"""
self.points = []
self.vectors = []
self.point_style = []
self.annotations = []
def add_points(self, points, meth='s'):
"""Add a list of data points to bloch sphere.
Parameters
----------
points : array/list
Collection of data points.
meth : str {'s', 'm', 'l'}
Type of points to plot, use 'm' for multicolored, 'l' for points
connected with a line.
"""
if not isinstance(points[0], (list, ndarray)):
points = [[points[0]], [points[1]], [points[2]]]
points = array(points)
if meth == 's':
if len(points[0]) == 1:
pnts = array([[points[0][0]], [points[1][0]], [points[2][0]]])
pnts = append(pnts, points, axis=1)
else:
pnts = points
self.points.append(pnts)
self.point_style.append('s')
elif meth == 'l':
self.points.append(points)
self.point_style.append('l')
else:
self.points.append(points)
self.point_style.append('m')
def add_states(self, state, kind='vector'):
"""Add a state vector Qobj to Bloch sphere.
Parameters
----------
state : qobj
Input state vector.
kind : str {'vector','point'}
Type of object to plot.
"""
if isinstance(state, Qobj):
state = [state]
for st in state:
vec = [expect(sigmax(), st),
expect(sigmay(), st),
expect(sigmaz(), st)]
if kind == 'vector':
self.add_vectors(vec)
elif kind == 'point':
self.add_points(vec)
def add_vectors(self, vectors):
"""Add a list of vectors to Bloch sphere.
Parameters
----------
vectors : array_like
Array with vectors of unit length or smaller.
"""
if isinstance(vectors[0], (list, ndarray)):
for vec in vectors:
self.vectors.append(vec)
else:
self.vectors.append(vectors)
def add_annotation(self, state_or_vector, text, **kwargs):
"""Add a text or LaTeX annotation to Bloch sphere,
parametrized by a qubit state or a vector.
Parameters
----------
state_or_vector : Qobj/array/list/tuple
Position for the annotaion.
Qobj of a qubit or a vector of 3 elements.
text : str/unicode
Annotation text.
You can use LaTeX, but remember to use raw string
e.g. r"$\\langle x \\rangle$"
or escape backslashes
e.g. "$\\\\langle x \\\\rangle$".
**kwargs :
Options as for mplot3d.axes3d.text, including:
fontsize, color, horizontalalignment, verticalalignment.
"""
if isinstance(state_or_vector, Qobj):
vec = [expect(sigmax(), state_or_vector),
expect(sigmay(), state_or_vector),
expect(sigmaz(), state_or_vector)]
elif isinstance(state_or_vector, (list, ndarray, tuple)) \
and len(state_or_vector) == 3:
vec = state_or_vector
else:
raise Exception("Position needs to be specified by a qubit " +
"state or a 3D vector.")
self.annotations.append({'position': vec,
'text': text,
'opts': kwargs})
def make_sphere(self):
"""
Plots Bloch sphere and data sets.
"""
self.render(self.fig, self.axes)
def render(self, fig=None, axes=None):
"""
Render the Bloch sphere and its data sets in on given figure and axes.
"""
if self._rendered:
self.axes.clear()
self._rendered = True
# Figure instance for Bloch sphere plot
if not fig:
self.fig = plt.figure(figsize=self.figsize)
if not axes:
self.axes = Axes3D(self.fig, azim=self.view[0], elev=self.view[1])
if self.background:
self.axes.clear()
self.axes.set_xlim3d(-1.3, 1.3)
self.axes.set_ylim3d(-1.3, 1.3)
self.axes.set_zlim3d(-1.3, 1.3)
else:
self.plot_axes()
self.axes.set_axis_off()
self.axes.set_xlim3d(-0.7, 0.7)
self.axes.set_ylim3d(-0.7, 0.7)
self.axes.set_zlim3d(-0.7, 0.7)
self.axes.grid(False)
self.plot_back()
self.plot_points()
self.plot_vectors()
self.plot_front()
self.plot_axes_labels()
self.plot_annotations()
def plot_back(self):
# back half of sphere
u = linspace(0, pi, 25)
v = linspace(0, pi, 25)
x = outer(cos(u), sin(v))
y = outer(sin(u), sin(v))
z = outer(ones(size(u)), cos(v))
self.axes.plot_surface(x, y, z, rstride=2, cstride=2,
color=self.sphere_color, linewidth=0,
alpha=self.sphere_alpha)
# wireframe
self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,
color=self.frame_color,
alpha=self.frame_alpha)
# equator
self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='z',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='x',
lw=self.frame_width, color=self.frame_color)
def plot_front(self):
# front half of sphere
u = linspace(-pi, 0, 25)
v = linspace(0, pi, 25)
x = outer(cos(u), sin(v))
y = outer(sin(u), sin(v))
z = outer(ones(size(u)), cos(v))
self.axes.plot_surface(x, y, z, rstride=2, cstride=2,
color=self.sphere_color, linewidth=0,
alpha=self.sphere_alpha)
# wireframe
self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,
color=self.frame_color,
alpha=self.frame_alpha)
# equator
self.axes.plot(1.0 * cos(u), 1.0 * sin(u),
zs=0, zdir='z', lw=self.frame_width,
color=self.frame_color)
self.axes.plot(1.0 * cos(u), 1.0 * sin(u),
zs=0, zdir='x', lw=self.frame_width,
color=self.frame_color)
def plot_axes(self):
# axes
span = linspace(-1.0, 1.0, 2)
self.axes.plot(span, 0 * span, zs=0, zdir='z', label='X',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='z', label='Y',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='y', label='Z',
lw=self.frame_width, color=self.frame_color)
def plot_axes_labels(self):
# axes labels
opts = {'fontsize': self.font_size,
'color': self.font_color,
'horizontalalignment': 'center',
'verticalalignment': 'center'}
self.axes.text(0, -self.xlpos[0], 0, self.xlabel[0], **opts)
self.axes.text(0, -self.xlpos[1], 0, self.xlabel[1], **opts)
self.axes.text(self.ylpos[0], 0, 0, self.ylabel[0], **opts)
self.axes.text(self.ylpos[1], 0, 0, self.ylabel[1], **opts)
self.axes.text(0, 0, self.zlpos[0], self.zlabel[0], **opts)
self.axes.text(0, 0, self.zlpos[1], self.zlabel[1], **opts)
for a in (self.axes.w_xaxis.get_ticklines() +
self.axes.w_xaxis.get_ticklabels()):
a.set_visible(False)
for a in (self.axes.w_yaxis.get_ticklines() +
self.axes.w_yaxis.get_ticklabels()):
a.set_visible(False)
for a in (self.axes.w_zaxis.get_ticklines() +
self.axes.w_zaxis.get_ticklabels()):
a.set_visible(False)
def plot_vectors(self):
# -X and Y data are switched for plotting purposes
for k in range(len(self.vectors)):
xs3d = self.vectors[k][1] * array([0, 1])
ys3d = -self.vectors[k][0] * array([0, 1])
zs3d = self.vectors[k][2] * array([0, 1])
color = self.vector_color[mod(k, len(self.vector_color))]
if self.vector_style == '':
# simple line style
self.axes.plot(xs3d, ys3d, zs3d,
zs=0, zdir='z', label='Z',
lw=self.vector_width, color=color)
else:
# decorated style, with arrow heads
a = Arrow3D(xs3d, ys3d, zs3d,
mutation_scale=self.vector_mutation,
lw=self.vector_width,
arrowstyle=self.vector_style,
color=color)
self.axes.add_artist(a)
def plot_points(self):
# -X and Y data are switched for plotting purposes
for k in range(len(self.points)):
num = len(self.points[k][0])
dist = [sqrt(self.points[k][0][j] ** 2 +
self.points[k][1][j] ** 2 +
self.points[k][2][j] ** 2) for j in range(num)]
if any(abs(dist - dist[0]) / dist[0] > 1e-12):
# combine arrays so that they can be sorted together
zipped = list(zip(dist, range(num)))
zipped.sort() # sort rates from lowest to highest
dist, indperm = zip(*zipped)
indperm = array(indperm)
else:
indperm = arange(num)
if self.point_style[k] == 's':
self.axes.scatter(
real(self.points[k][1][indperm]),
- real(self.points[k][0][indperm]),
real(self.points[k][2][indperm]),
s=self.point_size[mod(k, len(self.point_size))],
alpha=1,
edgecolor='none',
zdir='z',
color=self.point_color[mod(k, len(self.point_color))],
marker=self.point_marker[mod(k, len(self.point_marker))])
elif self.point_style[k] == 'm':
pnt_colors = array(self.point_color *
int(ceil(num / float(len(self.point_color)))))
pnt_colors = pnt_colors[0:num]
pnt_colors = list(pnt_colors[indperm])
marker = self.point_marker[mod(k, len(self.point_marker))]
s = self.point_size[mod(k, len(self.point_size))]
self.axes.scatter(real(self.points[k][1][indperm]),
-real(self.points[k][0][indperm]),
real(self.points[k][2][indperm]),
s=s, alpha=1, edgecolor='none',
zdir='z', color=pnt_colors,
marker=marker)
elif self.point_style[k] == 'l':
color = self.point_color[mod(k, len(self.point_color))]
self.axes.plot(real(self.points[k][1]),
-real(self.points[k][0]),
real(self.points[k][2]),
alpha=0.75, zdir='z',
color=color)
def plot_annotations(self):
# -X and Y data are switched for plotting purposes
for annotation in self.annotations:
vec = annotation['position']
opts = {'fontsize': self.font_size,
'color': self.font_color,
'horizontalalignment': 'center',
'verticalalignment': 'center'}
opts.update(annotation['opts'])
self.axes.text(vec[1], -vec[0], vec[2],
annotation['text'], **opts)
def show(self):
"""
Display Bloch sphere and corresponding data sets.
"""
self.render(self.fig, self.axes)
if self.fig:
plt.show(self.fig)
def save(self, name=None, format='png', dirc=None):
"""Saves Bloch sphere to file of type ``format`` in directory ``dirc``.
Parameters
----------
name : str
Name of saved image. Must include path and format as well.
i.e. '/Users/Paul/Desktop/bloch.png'
This overrides the 'format' and 'dirc' arguments.
format : str
Format of output image.
dirc : str
Directory for output images. Defaults to current working directory.
Returns
-------
File containing plot of Bloch sphere.
"""
self.render(self.fig, self.axes)
if dirc:
if not os.path.isdir(os.getcwd() + "/" + str(dirc)):
os.makedirs(os.getcwd() + "/" + str(dirc))
if name is None:
if dirc:
self.fig.savefig(os.getcwd() + "/" + str(dirc) + '/bloch_' +
str(self.savenum) + '.' + format)
else:
self.fig.savefig(os.getcwd() + '/bloch_' + str(self.savenum) +
'.' + format)
else:
self.fig.savefig(name)
self.savenum += 1
if self.fig:
plt.close(self.fig)
def _hide_tick_lines_and_labels(axis):
'''
Set visible property of ticklines and ticklabels of an axis to False
'''
for a in axis.get_ticklines() + axis.get_ticklabels():
a.set_visible(False)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.