repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
btabibian/scikit-learn | sklearn/tree/tree.py | 11 | 50091 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
import warnings
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_decrease,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the integer %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the float %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either None "
"or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
if self.min_impurity_split is not None:
warnings.warn("The min_impurity_split parameter is deprecated and"
" will be removed in version 0.21. "
"Use the min_impurity_decrease parameter instead.",
DeprecationWarning)
min_impurity_split = self.min_impurity_split
else:
min_impurity_split = 1e-7
if min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
if self.min_impurity_decrease < 0.:
raise ValueError("min_impurity_decrease must be greater than "
"or equal to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
self.min_impurity_decrease,
min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_decrease,
min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
.. versionadded:: 0.18
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'tree_')
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels) as integers or strings.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeClassifier, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float,
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. deprecated:: 0.19
``min_impurity_split`` has been deprecated in favor of
``min_impurity_decrease`` in 0.19 and will be removed in 0.21.
Use ``min_impurity_decrease`` instead.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data and
``max_features=n_features``, if the improvement of the criterion is
identical for several splits enumerated during the search of the best
split. To obtain a deterministic behaviour during fitting,
``random_state`` has to be fixed.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeRegressor, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_decrease=0.,
min_impurity_split=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
min_impurity_split=min_impurity_split,
random_state=random_state)
| bsd-3-clause |
numenta/htmresearch | projects/sequence_prediction/reberGrammar/reberSequencePrediction_TM.py | 7 | 7431 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import print_function
from htmresearch.support.reberGrammar import *
from nupic.encoders import SDRCategoryEncoder
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.ion()
rcParams.update({'figure.autolayout': True})
from htmresearch_core.experimental import ApicalTiebreakPairMemory
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from htmresearch.support.apical_tm_pair_monitor_mixin import (
ApicalTMPairMonitorMixin)
class MonitoredApicalTiebreakPairMemory(
ApicalTMPairMonitorMixin, ApicalTiebreakPairMemory):
pass
maxLength = 20
numTrainSequence = 50
numTestSequence = 50
rptPerSeq = 5
n = 2048
w = 40
enc = SDRCategoryEncoder(n, w, categoryList)
sdr_dict = dict()
for i in xrange(len(categoryList)):
sdr = enc.encode(categoryList[i])
activeCols = set(np.where(sdr)[0])
sdr_dict[categoryList[i]] = activeCols
def initializeTM():
DEFAULT_TEMPORAL_MEMORY_PARAMS = {"columnDimensions": (2048,),
"cellsPerColumn": 32,
"activationThreshold": 15,
"initialPermanence": 0.41,
"connectedPermanence": 0.5,
"minThreshold": 10,
"maxNewSynapseCount": 20,
"permanenceIncrement": 0.10,
"permanenceDecrement": 0.02,
"seed": 42}
params = dict(DEFAULT_TEMPORAL_MEMORY_PARAMS)
# params.update(tmOverrides or {})
# params["seed"] = seed
tm = MonitoredApicalTiebreakPairMemory(mmName="TM", **params)
return tm
def calculateOverlapWithSDRdict(activeCols, sdr_dict):
overlaps = []
for i in xrange(len(categoryList)):
overlaps.append(len(sdr_dict[categoryList[i]] & activeCols))
return overlaps
def findMaxOverlap(overlaps):
maxOverlapLoc = np.argmax(overlaps)
return categoryList[maxOverlapLoc]
def columnForCells(tm, cells):
columns = []
for i in xrange(len(cells)):
columns.append(tm.columnForCell(cells[i]))
return set(columns)
def feedSequenceToTM(tm, in_seq, tmLearn):
for i in xrange(len(in_seq)):
sequenceLabel = in_seq[i]
sdr_in = sdr_dict[sequenceLabel]
tm.compute(sdr_in,
learn=tmLearn,
sequenceLabel=sequenceLabel)
def trainTMonReberSeq(tm, numTrainSequence, seedSeq=1):
np.random.seed(seedSeq)
tmLearn = 1
for _ in xrange(numTrainSequence):
[in_seq, out_seq] = generateSequences(maxLength)
print("train seq", _, in_seq)
for _ in xrange(rptPerSeq):
tm.reset()
feedSequenceToTM(tm, in_seq, tmLearn)
return tm
def testTMOnReberSeq(tm, numTestSequence, seedSeq=2):
np.random.seed(seedSeq)
tmLearn = 0
outcomeAll = []
numStep = 0
numOutcome = 0
numPred = 0
numMiss = 0
numFP = 0
for _ in xrange(numTestSequence):
tm.reset()
tm.mmClearHistory()
[in_seq, out_seq] = generateSequences(maxLength)
print("test seq", _, in_seq)
feedSequenceToTM(tm, in_seq, tmLearn)
activeColsTrace = tm._mmTraces['activeColumns'].data
predictionTrace = tm._mmTraces['predictiveCells'].data
for i in xrange(len(activeColsTrace)-1):
# overlap = calculateOverlapWithSDRdict(activeColsTrace[i], sdr_dict)
# activation = findMaxOverlap(overlap)
predictedColumns = columnForCells(tm, list(predictionTrace[i]))
overlap = calculateOverlapWithSDRdict(predictedColumns, sdr_dict)
prediction = findMaxOverlap(overlap)
outcome = checkPrediction(out_seq[i], prediction)
outcomeAll.append(outcome)
prediction = getMatchingElements(overlap, 20)
(missN, fpN) = checkPrediction2(out_seq[i], prediction)
numPred += len(prediction)
numOutcome += len(out_seq[i])
numMiss += missN
numFP += fpN
numStep += 1
print("step: ", i, "current input", in_seq[i],
" possible next elements: ", out_seq[i], " prediction: ", prediction,
" outcome: ", outcome, "Miss: ", missN, "FP: ", fpN)
correctRate = sum(outcomeAll)/float(len(outcomeAll))
missRate = float(numMiss)/float(numStep * 7)
fpRate = float(numFP)/float(numStep * 7)
errRate = float(numMiss + numFP)/float(numStep * 7)
print("Correct Rate (Best Prediction): ", correctRate)
print("Error Rate: ", errRate)
print("Miss Rate: ", missRate)
print("False Positive Rate: ", fpRate)
return correctRate, missRate, fpRate
def runSingleExperiment(numTrainSequence, train_seed=1, test_seed=2):
tm = initializeTM()
trainTMonReberSeq(tm, numTrainSequence, seedSeq=train_seed)
(correctRate, missRate, fpRate) = testTMOnReberSeq(tm, numTestSequence, seedSeq=test_seed)
def runExperiment():
"""
Experiment 1: Calculate error rate as a function of training sequence numbers
:return:
"""
trainSeqN = [5, 10, 20, 50, 100, 200]
rptPerCondition = 5
correctRateAll = np.zeros((len(trainSeqN), rptPerCondition))
missRateAll = np.zeros((len(trainSeqN), rptPerCondition))
fpRateAll = np.zeros((len(trainSeqN), rptPerCondition))
for i in xrange(len(trainSeqN)):
for rpt in xrange(rptPerCondition):
tm = initializeTM()
train_seed = 1
numTrainSequence = trainSeqN[i]
trainTMonReberSeq(tm, numTrainSequence, seedSeq=train_seed)
(correctRate, missRate, fpRate) = testTMOnReberSeq(tm, numTestSequence, seedSeq=train_seed+rpt)
correctRateAll[i, rpt] = correctRate
missRateAll[i, rpt] = missRate
fpRateAll[i, rpt] = fpRate
np.savez('result/reberSequenceTM.npz',
correctRateAll=correctRateAll, missRateAll=missRateAll,
fpRateAll=fpRateAll, trainSeqN=trainSeqN)
plt.figure()
plt.subplot(2,2,1)
plt.semilogx(trainSeqN, 100*np.mean(correctRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate - Best Match (%)')
plt.subplot(2,2,2)
plt.semilogx(trainSeqN, 100*np.mean(missRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(trainSeqN, 100*np.mean(fpRateAll,1),'-*')
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_TMperformance.pdf')
if __name__ == "__main__":
runExperiment()
# uncomment to run a single experiment
# runSingleExperiment(5)
| agpl-3.0 |
dwf/numpy | numpy/lib/polynomial.py | 12 | 37449 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack, dot
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError("input must be 1d or square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError :
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
V : ndaray, shape (M,M) or (M,M,K) : present only if `full` = False and `cov`=True
The covariance matrix of the polynomial coefficient estimates. The diagonal
of this matrix are the variance estimates for each coefficient. If y is a 2-d
array, then the covariance matrix for the `k`-th data set are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0] :
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError, "expected a 1-d array for weights"
if w.shape[0] != y.shape[0] :
raise TypeError, "expected w and y to have the same length"
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full :
return c, resids, rank, s, rcond
elif cov :
Vbase = inv(dot(lhs.T,lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:,NX.newaxis] * fac
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 39224 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0,
criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| bsd-3-clause |
NelisVerhoef/scikit-learn | examples/plot_multioutput_face_completion.py | 330 | 3019 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
jaeilepp/mne-python | mne/decoding/csp.py | 1 | 37653 | # -*- coding: utf-8 -*-
# Authors: Romain Trachel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Alexandre Barachant <[email protected]>
# Clemens Brunner <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import copy as cp
import numpy as np
from scipy import linalg
from .mixin import TransformerMixin
from .base import BaseEstimator
from ..cov import _regularized_covariance
from ..utils import warn
class CSP(TransformerMixin, BaseEstimator):
u"""M/EEG signal decomposition using the Common Spatial Patterns (CSP).
This object can be used as a supervised decomposition to estimate
spatial filters for feature extraction in a 2 class decoding problem.
CSP in the context of EEG was first described in [1]; a comprehensive
tutorial on CSP can be found in [2]. Multiclass solving is implemented
from [3].
Parameters
----------
n_components : int, defaults to 4
The number of components to decompose M/EEG signals.
This number should be set by cross-validation.
reg : float | str | None, defaults to None
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
log : None | bool, defaults to None
If transform_into == 'average_power' and log is None or True, then
applies a log transform to standardize the features, else the features
are z-scored. If transform_into == 'csp_space', then log must be None.
cov_est : 'concat' | 'epoch', defaults to 'concat'
If 'concat', covariance matrices are estimated on concatenated epochs
for each class.
If 'epoch', covariance matrices are estimated on each epoch separately
and then averaged over each class.
transform_into : {'average_power', 'csp_space'}
If 'average_power' then self.transform will return the average power of
each spatial filter. If 'csp_space' self.transform will return the data
in CSP space. Defaults to 'average_power'.
norm_trace : bool
Normalize class covariance by its trace. Defaults to True. Trace
normalization is a step of the original CSP algorithm [1]_ to eliminate
magnitude variations in the EEG between individuals. It is not applied
in more recent work [2]_, [3]_ and can have a negative impact on
patterns ordering.
Attributes
----------
``filters_`` : ndarray, shape (n_components, n_channels)
If fit, the CSP components used to decompose the data, else None.
``patterns_`` : ndarray, shape (n_components, n_channels)
If fit, the CSP patterns used to restore M/EEG signals, else None.
``mean_`` : ndarray, shape (n_components,)
If fit, the mean squared power for each component.
``std_`` : ndarray, shape (n_components,)
If fit, the std squared power for each component.
See Also
--------
mne.preprocessing.Xdawn, SPoC
References
----------
.. [1] Zoltan J. Koles, Michael S. Lazar, Steven Z. Zhou. Spatial Patterns
Underlying Population Differences in the Background EEG. Brain
Topography 2(4), 275-284, 1990.
.. [2] Benjamin Blankertz, Ryota Tomioka, Steven Lemm, Motoaki Kawanabe,
Klaus-Robert Müller. Optimizing Spatial Filters for Robust EEG
Single-Trial Analysis. IEEE Signal Processing Magazine 25(1), 41-56,
2008.
.. [3] Grosse-Wentrup, Moritz, and Martin Buss. Multiclass common spatial
patterns and information theoretic feature extraction. IEEE
Transactions on Biomedical Engineering, Vol 55, no. 8, 2008.
"""
def __init__(self, n_components=4, reg=None, log=None, cov_est="concat",
transform_into='average_power', norm_trace=None):
"""Init of CSP."""
# Init default CSP
if not isinstance(n_components, int):
raise ValueError('n_components must be an integer.')
self.n_components = n_components
# Init default regularization
if (
(reg is not None) and
(reg not in ['oas', 'ledoit_wolf']) and
((not isinstance(reg, (float, int))) or
(not ((reg <= 1.) and (reg >= 0.))))
):
raise ValueError('reg must be None, "oas", "ledoit_wolf" or a '
'float in between 0. and 1.')
self.reg = reg
# Init default cov_est
if not (cov_est == "concat" or cov_est == "epoch"):
raise ValueError("unknown covariance estimation method")
self.cov_est = cov_est
# Init default transform_into
if transform_into not in ('average_power', 'csp_space'):
raise ValueError('transform_into must be "average_power" or '
'"csp_space".')
self.transform_into = transform_into
# Init default log
if transform_into == 'average_power':
if log is not None and not isinstance(log, bool):
raise ValueError('log must be a boolean if transform_into == '
'"average_power".')
else:
if log is not None:
raise ValueError('log must be a None if transform_into == '
'"csp_space".')
self.log = log
if norm_trace is None:
norm_trace = True
warn("new_param defaults to True in 0.15, but will change to False"
" in 0.16. Set it explicitly to avoid this warning.",
DeprecationWarning)
if not isinstance(norm_trace, bool):
raise ValueError('norm_trace must be a bool.')
self.norm_trace = norm_trace
def _check_Xy(self, X, y=None):
"""Aux. function to check input data."""
if y is not None:
if len(X) != len(y) or len(y) < 1:
raise ValueError('X and y must have the same length.')
if X.ndim < 3:
raise ValueError('X must have at least 3 dimensions.')
def fit(self, X, y):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
The data on which to estimate the CSP.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)."
% type(X))
self._check_Xy(X, y)
n_channels = X.shape[1]
self._classes = np.unique(y)
n_classes = len(self._classes)
if n_classes < 2:
raise ValueError("n_classes must be >= 2.")
covs = np.zeros((n_classes, n_channels, n_channels))
sample_weights = list()
for class_idx, this_class in enumerate(self._classes):
if self.cov_est == "concat": # concatenate epochs
class_ = np.transpose(X[y == this_class], [1, 0, 2])
class_ = class_.reshape(n_channels, -1)
cov = _regularized_covariance(class_, reg=self.reg)
weight = sum(y == this_class)
elif self.cov_est == "epoch":
class_ = X[y == this_class]
cov = np.zeros((n_channels, n_channels))
for this_X in class_:
cov += _regularized_covariance(this_X, reg=self.reg)
cov /= len(class_)
weight = len(class_)
covs[class_idx] = cov
if self.norm_trace:
# Append covariance matrix and weight. Prior to version 0.15,
# trace normalization was applied, but was breaking results for
# some usecases by chaging the apparent ranking of patterns.
# Trace normalization of the covariance matrix was removed
# without signigificant effect on patterns or performances.
# If the user interested in this feature, we suggest trace
# normalization of the epochs prior to the CSP.
covs[class_idx] /= np.trace(cov)
sample_weights.append(weight)
if n_classes == 2:
eigen_values, eigen_vectors = linalg.eigh(covs[0], covs.sum(0))
# sort eigenvectors
ix = np.argsort(np.abs(eigen_values - 0.5))[::-1]
else:
# The multiclass case is adapted from
# http://github.com/alexandrebarachant/pyRiemann
eigen_vectors, D = _ajd_pham(covs)
# Here we apply an euclidean mean. See pyRiemann for other metrics
mean_cov = np.average(covs, axis=0, weights=sample_weights)
eigen_vectors = eigen_vectors.T
# normalize
for ii in range(eigen_vectors.shape[1]):
tmp = np.dot(np.dot(eigen_vectors[:, ii].T, mean_cov),
eigen_vectors[:, ii])
eigen_vectors[:, ii] /= np.sqrt(tmp)
# class probability
class_probas = [np.mean(y == _class) for _class in self._classes]
# mutual information
mutual_info = []
for jj in range(eigen_vectors.shape[1]):
aa, bb = 0, 0
for (cov, prob) in zip(covs, class_probas):
tmp = np.dot(np.dot(eigen_vectors[:, jj].T, cov),
eigen_vectors[:, jj])
aa += prob * np.log(np.sqrt(tmp))
bb += prob * (tmp ** 2 - 1)
mi = - (aa + (3.0 / 16) * (bb ** 2))
mutual_info.append(mi)
ix = np.argsort(mutual_info)[::-1]
# sort eigenvectors
eigen_vectors = eigen_vectors[:, ix]
self.filters_ = eigen_vectors.T
self.patterns_ = linalg.pinv(eigen_vectors)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
X = (X ** 2).mean(axis=2)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, X):
"""Estimate epochs sources given the CSP filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray
If self.transform_into == 'average_power' then returns the power of
CSP features averaged over time and shape (n_epochs, n_sources)
If self.transform_into == 'csp_space' then returns the data in CSP
space and shape is (n_epochs, n_sources, n_times)
"""
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)." % type(X))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
if self.transform_into == 'average_power':
X = (X ** 2).mean(axis=2)
log = True if self.log is None else self.log
if log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
def plot_patterns(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic patterns of components.
The patterns explain how the measured data was generated from the
neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used for fitting.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. Defaults to 6.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
def plot_filters(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic filters of components.
The filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used for fitting.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be
drawn. When an integer, matplotlib ticker locator is used to find
suitable values for the contour thresholds (may sometimes be
inaccurate, use array for accuracy). If an array, the values
represent the levels for the contours. Defaults to 6.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
filters = EvokedArray(self.filters_, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
def _ajd_pham(X, eps=1e-6, max_iter=15):
"""Approximate joint diagonalization based on Pham's algorithm.
This is a direct implementation of the PHAM's AJD algorithm [1].
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_channels)
A set of covariance matrices to diagonalize.
eps : float, defaults to 1e-6
The tolerance for stoping criterion.
max_iter : int, defaults to 1000
The maximum number of iteration to reach convergence.
Returns
-------
V : ndarray, shape (n_channels, n_channels)
The diagonalizer.
D : ndarray, shape (n_epochs, n_channels, n_channels)
The set of quasi diagonal matrices.
References
----------
.. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive
definite Hermitian matrices." SIAM Journal on Matrix Analysis and
Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(max_iter):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V, D
class SPoC(CSP):
"""Implementation of the SPoC spatial filtering.
Source Power Comodulation (SPoC) [1]_ allows to extract spatial filters and
patterns by using a target (continuous) variable in the decomposition
process in order to give preference to components whose power correlates
with the target variable.
SPoC can be seen as an extension of the CSP driven by a continuous
variable rather than a discrete variable. Typical applications include
extraction of motor patterns using EMG power or audio patterns using sound
envelope.
Parameters
----------
n_components : int
The number of components to decompose M/EEG signals.
reg : float | str | None, defaults to None
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
log : None | bool, defaults to None
If transform_into == 'average_power' and log is None or True, then
applies a log transform to standardize the features, else the features
are z-scored. If transform_into == 'csp_space', then log must be None.
transform_into : {'average_power', 'csp_space'}
If 'average_power' then self.transform will return the average power of
each spatial filter. If 'csp_space' self.transform will return the data
in CSP space. Defaults to 'average_power'.
Attributes
----------
``filters_`` : ndarray, shape (n_components, n_channels)
If fit, the SPoC spatial filters, else None.
``patterns_`` : ndarray, shape (n_components, n_channels)
If fit, the SPoC spatial patterns, else None.
``mean_`` : ndarray, shape (n_components,)
If fit, the mean squared power for each component.
``std_`` : ndarray, shape (n_components,)
If fit, the std squared power for each component.
See Also
--------
mne.preprocessing.Xdawn, CSP
References
----------
.. [1] Dahne, S., Meinecke, F. C., Haufe, S., Hohne, J., Tangermann, M.,
Muller, K. R., & Nikulin, V. V. (2014). SPoC: a novel framework for
relating the amplitude of neuronal oscillations to behaviorally
relevant parameters. NeuroImage, 86, 111-122.
"""
def __init__(self, n_components=4, reg=None, log=None,
transform_into='average_power'):
"""Init of SPoC."""
super(SPoC, self).__init__(n_components=n_components, reg=reg, log=log,
cov_est="epoch", norm_trace=False,
transform_into=transform_into)
# Covariance estimation have to be done on the single epoch level,
# unlike CSP where covariance estimation can also be achieved through
# concatenation of all epochs from the same class.
delattr(self, 'cov_est')
delattr(self, 'norm_trace')
def fit(self, X, y):
"""Estimate the SPoC decomposition on epochs.
Parameters
----------
X : ndarray, shape (n_epochs, n_channels, n_times)
The data on which to estimate the SPoC.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of SPoC
Returns the modified instance.
"""
if not isinstance(X, np.ndarray):
raise ValueError("X should be of type ndarray (got %s)."
% type(X))
self._check_Xy(X, y)
if len(np.unique(y)) < 2:
raise ValueError("y must have at least two distinct values.")
# The following code is direclty copied from pyRiemann
# Normalize target variable
target = y.astype(np.float64)
target -= target.mean()
target /= target.std()
n_epochs, n_channels = X.shape[:2]
# Estimate single trial covariance
covs = np.empty((n_epochs, n_channels, n_channels))
for ii, epoch in enumerate(X):
covs[ii] = _regularized_covariance(epoch, reg=self.reg)
C = covs.mean(0)
Cz = np.mean(covs * target[:, np.newaxis, np.newaxis], axis=0)
# solve eigenvalue decomposition
evals, evecs = linalg.eigh(Cz, C)
evals = evals.real
evecs = evecs.real
# sort vectors
ix = np.argsort(np.abs(evals))[::-1]
# sort eigenvectors
evecs = evecs[:, ix].T
# spatial patterns
self.patterns_ = linalg.pinv(evecs).T # n_channels x n_channels
self.filters_ = evecs # n_channels x n_channels
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in X])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, X):
"""Estimate epochs sources given the SPoC filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The data.
Returns
-------
X : ndarray
If self.transform_into == 'average_power' then returns the power of
CSP features averaged over time and shape (n_epochs, n_sources)
If self.transform_into == 'csp_space' then returns the data in CSP
space and shape is (n_epochs, n_sources, n_times)
"""
return super(SPoC, self).transform(X)
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/sparse/test_dtype.py | 2 | 4069 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
@pytest.mark.parametrize("dtype, fill_value", [
('int', 0),
('float', np.nan),
('bool', False),
('object', np.nan),
('datetime64[ns]', pd.NaT),
('timedelta64[ns]', pd.NaT),
])
def test_inferred_dtype(dtype, fill_value):
sparse_dtype = SparseDtype(dtype)
result = sparse_dtype.fill_value
if pd.isna(fill_value):
assert pd.isna(result) and type(result) == type(fill_value)
else:
assert result == fill_value
def test_from_sparse_dtype():
dtype = SparseDtype('float', 0)
result = SparseDtype(dtype)
assert result.fill_value == 0
def test_from_sparse_dtype_fill_value():
dtype = SparseDtype('int', 1)
result = SparseDtype(dtype, fill_value=2)
expected = SparseDtype('int', 2)
assert result == expected
@pytest.mark.parametrize('dtype, fill_value', [
('int', None),
('float', None),
('bool', None),
('object', None),
('datetime64[ns]', None),
('timedelta64[ns]', None),
('int', np.nan),
('float', 0),
])
def test_equal(dtype, fill_value):
a = SparseDtype(dtype, fill_value)
b = SparseDtype(dtype, fill_value)
assert a == b
assert b == a
def test_nans_equal():
a = SparseDtype(float, float('nan'))
b = SparseDtype(float, np.nan)
assert a == b
assert b == a
@pytest.mark.parametrize('a, b', [
(SparseDtype('float64'), SparseDtype('float32')),
(SparseDtype('float64'), SparseDtype('float64', 0)),
(SparseDtype('float64'), SparseDtype('datetime64[ns]', np.nan)),
(SparseDtype(int, pd.NaT), SparseDtype(float, pd.NaT)),
(SparseDtype('float64'), np.dtype('float64')),
])
def test_not_equal(a, b):
assert a != b
def test_construct_from_string_raises():
with pytest.raises(TypeError):
SparseDtype.construct_from_string('not a dtype')
@pytest.mark.parametrize("dtype, expected", [
(SparseDtype(int), True),
(SparseDtype(float), True),
(SparseDtype(bool), True),
(SparseDtype(object), False),
(SparseDtype(str), False),
])
def test_is_numeric(dtype, expected):
assert dtype._is_numeric is expected
def test_str_uses_object():
result = SparseDtype(str).subtype
assert result == np.dtype('object')
@pytest.mark.parametrize("string, expected", [
('Sparse[float64]', SparseDtype(np.dtype('float64'))),
('Sparse[float32]', SparseDtype(np.dtype('float32'))),
('Sparse[int]', SparseDtype(np.dtype('int'))),
('Sparse[str]', SparseDtype(np.dtype('str'))),
('Sparse[datetime64[ns]]', SparseDtype(np.dtype('datetime64[ns]'))),
("Sparse", SparseDtype(np.dtype("float"), np.nan))
])
def test_construct_from_string(string, expected):
result = SparseDtype.construct_from_string(string)
assert result == expected
@pytest.mark.parametrize("a, b, expected", [
(SparseDtype(float, 0.0), SparseDtype(np.dtype('float'), 0.0), True),
(SparseDtype(int, 0), SparseDtype(int, 0), True),
(SparseDtype(float, float('nan')), SparseDtype(float, np.nan), True),
(SparseDtype(float, 0), SparseDtype(float, np.nan), False),
(SparseDtype(int, 0.0), SparseDtype(float, 0.0), False),
])
def test_hash_equal(a, b, expected):
result = a == b
assert result is expected
result = hash(a) == hash(b)
assert result is expected
@pytest.mark.parametrize('string, expected', [
('Sparse[int]', 'int'),
('Sparse[int, 0]', 'int'),
('Sparse[int64]', 'int64'),
('Sparse[int64, 0]', 'int64'),
('Sparse[datetime64[ns], 0]', 'datetime64[ns]'),
])
def test_parse_subtype(string, expected):
subtype, _ = SparseDtype._parse_subtype(string)
assert subtype == expected
@pytest.mark.parametrize("string", [
"Sparse[int, 1]",
"Sparse[float, 0.0]",
"Sparse[bool, True]",
])
def test_construct_from_string_fill_value_raises(string):
with tm.assert_raises_regex(TypeError, 'fill_value in the string is not'):
SparseDtype.construct_from_string(string)
| bsd-3-clause |
mne-tools/mne-python | examples/stats/linear_regression_raw.py | 18 | 2385 | """
========================================
Regression on continuous data (rER[P/F])
========================================
This demonstrates how rER[P/F]s - regressing the continuous data - is a
generalisation of traditional averaging. If all preprocessing steps
are the same, no overlap between epochs exists, and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate effects, including those of
continuous predictors.
rERPs are described in:
Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
# Authors: Jona Sassenhagen <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.stats.regression import linear_regression_raw
# Load and preprocess data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg='grad', stim=True, eeg=False).load_data()
raw.filter(1, None, fir_design='firwin') # high-pass
# Set up events
events = mne.find_events(raw)
event_id = {'Aud/L': 1, 'Aud/R': 2}
tmin, tmax = -.1, .5
# regular epoching
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
reject=None, tmin=tmin, tmax=tmax)
# linear_regression_raw returns a dict of evokeds
# select conditions similarly to mne.Epochs objects
# plot both results, and their difference
cond = "Aud/L"
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
params = dict(spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)),
time_unit='s')
epochs[cond].average().plot(axes=ax1, **params)
evokeds[cond].plot(axes=ax2, **params)
contrast = mne.combine_evoked([evokeds[cond], epochs[cond].average()],
weights=[1, -1])
contrast.plot(axes=ax3, **params)
ax1.set_title("Traditional averaging")
ax2.set_title("rERF")
ax3.set_title("Difference")
plt.show()
| bsd-3-clause |
kelle/astropy | astropy/visualization/wcsaxes/core.py | 1 | 21216 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import numpy as np
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
from ...coordinates import SkyCoord, BaseCoordinateFrame
from ...wcs import WCS
from ...wcs.utils import wcs_to_celestial_frame
from ...extern import six
from .transforms import (WCSPixel2WorldTransform, WCSWorld2PixelTransform,
CoordinateTransform)
from .coordinates_map import CoordinatesMap
from .utils import get_coord_meta
from .frame import EllipticalFrame, RectangularFrame
__all__ = ['WCSAxes', 'WCSAxesSubplot']
VISUAL_PROPERTIES = ['facecolor', 'edgecolor', 'linewidth', 'alpha', 'linestyle']
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [1., 1.]
IDENTITY.wcs.crpix = [1., 1.]
IDENTITY.wcs.cdelt = [1., 1.]
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
rect : list
The position of the axes in the figure in relative units. Should be
given as ``[left, bottom, width, height]``.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances.
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(self, fig, rect, wcs=None, transform=None, coord_meta=None,
transData=None, slices=None, frame_class=RectangularFrame,
**kwargs):
super(WCSAxes, self).__init__(fig, rect, **kwargs)
self._bboxes = []
self.frame_class = frame_class
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect('key_press_event', self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return "%s %s (pixel)" % (x, y)
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
xw = coords[self._x_index].format_coord(world[self._x_index])
yw = coords[self._y_index].format_coord(world[self._y_index])
if self._display_coords_index == 0:
system = "world"
else:
system = "world, overlay {0}".format(self._display_coords_index)
coord_string = "%s %s (%s)" % (xw, yw, system)
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == 'w':
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.get('origin', None)
if origin == 'upper':
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
# To check whether the image is a PIL image we can check if the data
# has a 'getpixel' attribute - this is what Matplotlib's AxesImage does
try:
from PIL.Image import Image, FLIP_TOP_BOTTOM
except ImportError:
# We don't need to worry since PIL is not installed, so user cannot
# have passed RGB image.
pass
else:
if isinstance(X, Image) or hasattr(X, 'getpixel'):
X = X.transpose(FLIP_TOP_BOTTOM)
kwargs['origin'] = 'lower'
return super(WCSAxes, self).imshow(X, *args, **kwargs)
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot : This method is called from this function with all arguments passed to it.
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
plot_data = []
for coord in self.coords:
if coord.coord_type == 'longitude':
plot_data.append(frame0.data.lon.to_value(coord.coord_unit))
elif coord.coord_type == 'latitude':
plot_data.append(frame0.data.lat.to_value(coord.coord_unit))
else:
raise NotImplementedError("Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude.")
if 'transform' in kwargs.keys():
raise TypeError("The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame.")
transform = self.get_transform(frame0)
kwargs.update({'transform': transform})
args = tuple(plot_data) + args[1:]
super(WCSAxes, self).plot(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
wcs.wcs.set()
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, 'coords'):
previous_frame = {'path': self.coords.frame._path,
'color': self.coords.frame.get_color(),
'linewidth': self.coords.frame.get_linewidth()}
else:
previous_frame = {'path': None}
self.coords = CoordinatesMap(self, wcs=self.wcs, slice=slices,
transform=transform, coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame['path'])
if previous_frame['path'] is not None:
self.coords.frame.set_color(previous_frame['color'])
self.coords.frame.set_linewidth(previous_frame['linewidth'])
self._all_coords = [self.coords]
if slices is None:
self.slices = ('x', 'y')
self._x_index = 0
self._y_index = 1
else:
self.slices = slices
self._x_index = self.slices.index('x')
self._y_index = self.slices.index('y')
# Common default settings for Rectangular Frame
if self.frame_class is RectangularFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('b')
self.coords[coord_index].set_ticklabel_position('b')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_axislabel_position('l')
self.coords[coord_index].set_ticklabel_position('l')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
# Common default settings for Elliptical Frame
elif self.frame_class is EllipticalFrame:
for coord_index in range(len(self.slices)):
if self.slices[coord_index] == 'x':
self.coords[coord_index].set_axislabel_position('h')
self.coords[coord_index].set_ticklabel_position('h')
self.coords[coord_index].set_ticks_position('h')
elif self.slices[coord_index] == 'y':
self.coords[coord_index].set_ticks_position('c')
self.coords[coord_index].set_axislabel_position('c')
self.coords[coord_index].set_ticklabel_position('c')
else:
self.coords[coord_index].set_axislabel_position('')
self.coords[coord_index].set_ticklabel_position('')
self.coords[coord_index].set_ticks_position('')
def draw(self, renderer, inframe=False):
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super(WCSAxes, self).draw(renderer, inframe)
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
self._ticklabels_bbox = []
visible_ticks = []
for coords in self._all_coords:
coords.frame.update()
for coord in coords:
coord._draw(renderer, bboxes=self._bboxes,
ticklabels_bbox=self._ticklabels_bbox)
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
for coord in coords:
coord._draw_axislabels(renderer, bboxes=self._bboxes,
ticklabels_bbox=self._ticklabels_bbox,
visible_ticks=visible_ticks)
self.coords.frame.draw(renderer)
self._drawn = True
def set_xlabel(self, label, labelpad=1, **kwargs):
self.coords[self._x_index].set_axislabel(label, minpad=labelpad, **kwargs)
def set_ylabel(self, label, labelpad=1, **kwargs):
self.coords[self._y_index].set_axislabel(label, minpad=labelpad, **kwargs)
def get_xlabel(self):
return self.coords[self._x_index].get_axislabel()
def get_ylabel(self):
return self.coords[self._y_index].get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
coords = CoordinatesMap(self, frame, frame_class=self.frame_class)
else:
if coord_meta is None:
coord_meta = get_coord_meta(frame)
transform = self._get_transform_no_transdata(frame)
coords = CoordinatesMap(self, transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position('t')
coords[1].set_axislabel_position('r')
coords[0].set_ticklabel_position('t')
coords[1].set_ticklabel_position('r')
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if self.wcs is None and frame != 'pixel':
raise ValueError('No WCS specified, so only pixel coordinates are available')
if isinstance(frame, WCS):
coord_in = wcs_to_celestial_frame(self.wcs)
coord_out = wcs_to_celestial_frame(frame)
if coord_in == coord_out:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
WCSWorld2PixelTransform(frame))
else:
return (WCSPixel2WorldTransform(self.wcs, slice=self.slices) +
CoordinateTransform(self.wcs, frame) +
WCSWorld2PixelTransform(frame))
elif frame == 'pixel':
return Affine2D()
elif isinstance(frame, Transform):
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
return pixel2world + frame
else:
pixel2world = WCSPixel2WorldTransform(self.wcs, slice=self.slices)
if frame == 'world':
return pixel2world
else:
coordinate_transform = CoordinateTransform(self.wcs, frame)
if coordinate_transform.same_frames:
return pixel2world
else:
return pixel2world + CoordinateTransform(self.wcs, frame)
def get_tightbbox(self, renderer):
if not self.get_visible():
return
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis='both', **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
"""
if not hasattr(self, 'coords'):
return
which = kwargs.pop('which', 'major')
if which != 'major':
raise NotImplementedError('Plotting the grid for the minor ticks is '
'not supported.')
if axis == 'both':
self.coords.grid(draw_grid=b, **kwargs)
elif axis == 'x':
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == 'y':
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError('axis should be one of x/y/both')
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
| bsd-3-clause |
calico/basenji | tests/test_train.py | 1 | 6235 | #!/usr/bin/env python
from optparse import OptionParser
import glob
import os
import shutil
import unittest
import numpy as np
import pandas as pd
from scipy.stats import mannwhitneyu
from scipy.stats import ttest_ind
import slurm
class TestTrain(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.params_file = 'train/params.json'
cls.data_dir = 'train/data'
cls.ref_dir = 'train/ref'
cls.iterations = 4
cls.basenji_path = '/home/drk/code/basenji2/bin'
cls.conda_env = 'tf1.15-gpu2'
cls.queue = 'gtx1080ti'
def test_train(self):
exp_dir = 'train/exp'
if os.path.isdir(exp_dir):
shutil.rmtree(exp_dir)
os.mkdir(exp_dir)
################################################################
# train
################################################################
jobs = []
for i in range(self.iterations):
it_dir = '%s/%d' % (exp_dir, i)
os.mkdir(it_dir)
# basenji train
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % self.conda_env
basenji_cmd += ' %s/basenji_train.py' % self.basenji_path
basenji_cmd += ' -o %s/train' % it_dir
basenji_cmd += ' %s' % self.params_file
basenji_cmd += ' %s' % self.data_dir
basenji_job = slurm.Job(basenji_cmd,
name='train%d' % i,
out_file='%s/train.out'%it_dir,
err_file='%s/train.err'%it_dir,
queue=self.queue,
cpu=1,
gpu=1,
mem=23000,
time='2-00:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
################################################################
# test check
################################################################
jobs = []
for i in range(self.iterations):
it_dir = '%s/%d' % (exp_dir, i)
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % self.conda_env
basenji_cmd += ' %s/basenji_test.py' % self.basenji_path
basenji_cmd += ' -o %s/test_train' % it_dir
basenji_cmd += ' --tfr "train-*.tfr"'
basenji_cmd += ' %s' % self.params_file
basenji_cmd += ' %s/train/model_check.h5' % it_dir
basenji_cmd += ' %s' % self.data_dir
basenji_job = slurm.Job(basenji_cmd,
name='test%d' % i,
out_file='%s/test_train.out'%it_dir,
err_file='%s/test_train.err'%it_dir,
queue=self.queue,
cpu=1,
gpu=1,
mem=23000,
time='1:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
################################################################
# test best
################################################################
jobs = []
for i in range(self.iterations):
it_dir = '%s/%d' % (exp_dir, i)
# basenji test
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % self.conda_env
basenji_cmd += ' %s/basenji_test.py' % self.basenji_path
basenji_cmd += ' -o %s/test' % it_dir
basenji_cmd += ' %s' % self.params_file
basenji_cmd += ' %s/train/model_best.h5' % it_dir
basenji_cmd += ' %s' % self.data_dir
basenji_job = slurm.Job(basenji_cmd,
name='test%d' % i,
out_file='%s/test.out'%it_dir,
err_file='%s/test.err'%it_dir,
queue=self.queue,
cpu=1,
gpu=1,
mem=23000,
time='1:00:00')
jobs.append(basenji_job)
slurm.multi_run(jobs, verbose=True)
################################################################
# compare checkpoint on training set
################################################################
ref_cors = []
for acc_file in glob.glob('%s/*/test_train/acc.txt' % self.ref_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
ref_cors.append(acc_df.pearsonr.mean())
exp_cors = []
for acc_file in glob.glob('%s/*/test_train/acc.txt' % exp_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
exp_cors.append(acc_df.pearsonr.mean())
_, mwp = mannwhitneyu(ref_cors, exp_cors, alternative='two-sided')
_, tp = ttest_ind(ref_cors, exp_cors)
print('\nTrain:')
print('Reference PearsonR: %.4f (%.4f)' % (np.mean(ref_cors), np.std(ref_cors)))
print('Experiment PearsonR: %.4f (%.4f)' % (np.mean(exp_cors), np.std(exp_cors)))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
# self.assertGreater(mwp, 0.05)
# self.assertGreater(tp, 0.05)
################################################################
# compare best on test set
################################################################
ref_cors = []
for acc_file in glob.glob('%s/*/test/acc.txt' % self.ref_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
ref_cors.append(acc_df.pearsonr.mean())
exp_cors = []
for acc_file in glob.glob('%s/*/test/acc.txt' % exp_dir):
acc_df = pd.read_csv(acc_file, sep='\t', index_col=0)
exp_cors.append(acc_df.pearsonr.mean())
_, mwp = mannwhitneyu(ref_cors, exp_cors, alternative='two-sided')
_, tp = ttest_ind(ref_cors, exp_cors)
print('\nTest:')
print('Reference PearsonR: %.4f (%.4f)' % (np.mean(ref_cors), np.std(ref_cors)))
print('Experiment PearsonR: %.4f (%.4f)' % (np.mean(exp_cors), np.std(exp_cors)))
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
# self.assertGreater(mwp, 0.05)
# self.assertGreater(tp, 0.05)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
altairpearl/scikit-learn | sklearn/model_selection/_split.py | 3 | 61154 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav R V <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'LabelShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, labels):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, labels=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, labels)
"""
for test_index in self._iter_test_indices(X, y, labels):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, labels=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def _iter_test_indices(self, X, y=None, labels=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, labels=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, LabelKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, labels):
yield train, test
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes label information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(n_splits=2)
>>> label_kfold.get_n_splits(X, y, labels)
2
>>> print(label_kfold)
LabelKFold(n_splits=2)
>>> for train_index, test_index in label_kfold.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(LabelKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if self.n_splits > n_labels:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of labels: %d."
% (self.n_splits, n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
indices = label_to_fold[labels]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, labels=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_labels = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_labels for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_splits=%d."
% (min_labels, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, labels=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedKFold, self).split(X, y, labels)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneLabelOut(BaseCrossValidator):
"""Leave One Label Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = LeaveOneLabelOut()
>>> lol.get_n_splits(X, y, labels)
2
>>> print(lol)
LeaveOneLabelOut()
>>> for train_index, test_index in lol.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
# We make a copy of labels to avoid side-effects during iteration
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
for i in unique_labels:
yield labels == i
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return len(np.unique(labels))
class LeavePLabelOut(BaseCrossValidator):
"""Leave P Labels Out cross-validator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_labels : int
Number of labels (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePLabelOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = LeavePLabelOut(n_labels=2)
>>> lpl.get_n_splits(X, y, labels)
3
>>> print(lpl)
LeavePLabelOut(n_labels=2)
>>> for train_index, test_index in lpl.split(X, y, labels):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n_labels):
self.n_labels = n_labels
def _iter_test_masks(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
labels = np.array(labels, copy=True)
unique_labels = np.unique(labels)
combi = combinations(range(len(unique_labels)), self.n_labels)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_labels[np.array(indices)]:
test_index[labels == l] = True
yield test_index
def get_n_splits(self, X, y, labels):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if labels is None:
raise ValueError("The labels parameter should not be None")
return int(comb(len(np.unique(labels)), self.n_labels, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, labels = indexable(X, y, labels)
for train, test in self._iter_indices(X, y, labels):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, labels=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class LabelShuffleSplit(ShuffleSplit):
'''Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(p=10)`` would be
``LabelShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(LabelShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, labels):
if labels is None:
raise ValueError("The labels parameter should not be None")
classes, label_indices = np.unique(labels, return_inverse=True)
for label_train, label_test in super(
LabelShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(label_indices, label_train))
test = np.flatnonzero(np.in1d(label_indices, label_test))
yield train, test
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, labels=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
p_i = class_counts / float(n_samples)
n_i = np.round(n_train * p_i).astype(int)
t_i = np.minimum(class_counts - n_i,
np.round(n_test * p_i).astype(int))
for _ in range(self.n_splits):
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) + len(test) < n_train + n_test:
# We complete by affecting randomly the missing indexes
missing_indices = np.where(bincount(train + test,
minlength=len(y)) == 0)[0]
missing_indices = rng.permutation(missing_indices)
n_missing_train = n_train - len(train)
n_missing_test = n_test - len(test)
if n_missing_train > 0:
train.extend(missing_indices[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_indices[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedShuffleSplit, self).split(X, y, labels)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i'
and test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i'
and train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, labels=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, labels=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
labels : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
not isinstance(estimator.kernel, GPKernel)):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[index] for index in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
jeicher/cobrapy | cobra/test/test_flux_analysis.py | 1 | 18750 | import pytest
import sys
from os import name
from contextlib import contextmanager
import re
from six import iteritems, StringIO
from cobra.core import Model, Reaction, Metabolite
from cobra.solvers import solver_dict, get_solver_name
from cobra.flux_analysis import *
from cobra.solvers import SolverNotFound
from .conftest import model, large_model, solved_model, fva_results
try:
import numpy
except ImportError:
numpy = None
try:
import matplotlib
except ImportError:
matplotlib = None
try:
import pandas
except ImportError:
pandas = None
try:
import tabulate
except ImportError:
tabulate = None
@contextmanager
def captured_output():
""" A context manager to test the IO summary methods """
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestCobraFluxAnalysis:
"""Test the simulation functions in cobra.flux_analysis"""
@pytest.mark.parametrize("solver", list(solver_dict))
def test_pfba_benchmark(self, large_model, benchmark, solver):
benchmark(optimize_minimal_flux, large_model, solver=solver)
@pytest.mark.parametrize("solver", list(solver_dict))
def test_pfba(self, model, solver):
optimize_minimal_flux(model, solver=solver)
abs_x = [abs(i) for i in model.solution.x]
assert model.solution.status == "optimal"
assert abs(model.solution.f - 0.8739) < 0.001
assert abs(sum(abs_x) - 518.4221) < 0.001
# Test desired_objective_value
desired_objective = 0.8
optimize_minimal_flux(model, solver=solver,
desired_objective_value=desired_objective)
abs_x = [abs(i) for i in model.solution.x]
assert model.solution.status == "optimal"
assert abs(model.solution.f - desired_objective) < 0.001
assert abs(sum(abs_x) - 476.1594) < 0.001
# Test fraction_of_optimum
optimize_minimal_flux(model, solver=solver,
fraction_of_optimum=0.95)
abs_x = [abs(i) for i in model.solution.x]
assert model.solution.status == "optimal"
assert abs(model.solution.f - 0.95 * 0.8739) < 0.001
assert abs(sum(abs_x) - 493.4400) < 0.001
# Make sure the model works for non-unity objective values
model.reactions.Biomass_Ecoli_core.objective_coefficient = 2
optimize_minimal_flux(model, solver=solver)
assert abs(model.solution.f - 2 * 0.8739) < 0.001
model.reactions.Biomass_Ecoli_core.objective_coefficient = 1
# Test some erroneous inputs -- multiple objectives
model.reactions.ATPM.objective_coefficient = 1
with pytest.raises(ValueError):
optimize_minimal_flux(model, solver=solver)
model.reactions.ATPM.objective_coefficient = 0
# Minimization of objective
with pytest.raises(ValueError):
optimize_minimal_flux(model, solver=solver,
objective_sense='minimize')
# Infeasible solution
atpm = float(model.reactions.ATPM.lower_bound)
model.reactions.ATPM.lower_bound = 500
with pytest.raises(ValueError):
optimize_minimal_flux(model, solver=solver)
model.reactions.ATPM.lower_bound = atpm
def test_single_gene_deletion_fba_benchmark(self, large_model, benchmark):
genes = ['b0511', 'b2521', 'b0651', 'b2502', 'b3132', 'b1486', 'b3384',
'b4321', 'b3428', 'b2789', 'b0052', 'b0115',
'b2167', 'b0759', 'b3389', 'b4031', 'b3916', 'b2374', 'b0677',
'b2202']
benchmark(single_gene_deletion, large_model, gene_list=genes)
def test_single_gene_deletion_fba(self, model):
# expected knockouts for textbook model
growth_dict = {"b0008": 0.87, "b0114": 0.80, "b0116": 0.78,
"b2276": 0.21, "b1779": 0.00}
rates, statuses = single_gene_deletion(model,
gene_list=growth_dict.keys(),
method="fba")
for gene, expected_value in iteritems(growth_dict):
assert statuses[gene] == 'optimal'
assert abs(rates[gene] - expected_value) < 0.01
def test_single_gene_deletion_moma_benchmark(self, large_model, benchmark):
try:
get_solver_name(qp=True)
except SolverNotFound:
pytest.skip("no qp support")
genes = ['b1764', 'b0463', 'b1779', 'b0417']
benchmark(single_gene_deletion, large_model, gene_list=genes,
method="moma")
def test_single_gene_deletion_moma(self, model):
try:
get_solver_name(qp=True)
except SolverNotFound:
pytest.skip("no qp support")
# expected knockouts for textbook model
growth_dict = {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
"b2276": 0.11, "b1779": 0.00}
rates, statuses = single_gene_deletion(model,
gene_list=growth_dict.keys(),
method="moma")
for gene, expected_value in iteritems(growth_dict):
assert statuses[gene] == 'optimal'
assert abs(rates[gene] - expected_value) < 0.01
def test_single_gene_deletion_benchmark(self, large_model, benchmark):
reactions = ['CDPMEK', 'PRATPP', 'HISTD', 'PPCDC']
benchmark(single_reaction_deletion, large_model,
reaction_list=reactions)
def test_single_reaction_deletion(self, model):
expected_results = {'FBA': 0.70404, 'FBP': 0.87392, 'CS': 0,
'FUM': 0.81430, 'GAPD': 0, 'GLUDy': 0.85139}
results, status = single_reaction_deletion(
model, reaction_list=expected_results.keys())
assert len(results) == 6
assert len(status) == 6
for status_value in status.values():
assert status_value == "optimal"
for reaction, value in results.items():
assert abs(value - expected_results[reaction]) < 0.00001
@classmethod
def compare_matrices(cls, matrix1, matrix2, places=3):
nrows = len(matrix1)
ncols = len(matrix1[0])
assert nrows == len(matrix2)
assert ncols == len(matrix2[0])
for i in range(nrows):
for j in range(ncols):
assert abs(matrix1[i][j] - matrix2[i][j]) < 10 ** -places
@pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
def test_double_gene_deletion_benchmark(self, large_model, benchmark):
genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276",
"b1241"]
benchmark(double_gene_deletion, large_model, gene_list1=genes)
@pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
def test_double_gene_deletion(self, model):
genes = ["b0726", "b4025", "b0724", "b0720", "b2935", "b2935", "b1276",
"b1241"]
growth_list = [
[0.858, 0.857, 0.814, 0.000, 0.858, 0.858, 0.858, 0.858],
[0.857, 0.863, 0.739, 0.000, 0.863, 0.863, 0.863, 0.863],
[0.814, 0.739, 0.814, 0.000, 0.814, 0.814, 0.814, 0.814],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874]]
opts = {"number_of_processes": 1} if name == "nt" else {}
solution = double_gene_deletion(model, gene_list1=genes, **opts)
assert solution["x"] == genes
assert solution["y"] == genes
self.compare_matrices(growth_list, solution["data"])
# test when lists differ slightly
solution = double_gene_deletion(model, gene_list1=genes[:-1],
gene_list2=genes,
number_of_processes=1)
assert solution["x"] == genes[:-1]
assert solution["y"] == genes
self.compare_matrices(growth_list[:-1], solution["data"])
@pytest.mark.skipif(numpy is None, reason="double deletions require numpy")
def test_double_reaction_deletion(self, model):
reactions = ['FBA', 'ATPS4r', 'ENO', 'FRUpts2']
growth_list = [[0.704, 0.135, 0.000, 0.704],
[0.135, 0.374, 0.000, 0.374],
[0.000, 0.000, 0.000, 0.000],
[0.704, 0.374, 0.000, 0.874]]
solution = double_reaction_deletion(model,
reaction_list1=reactions,
number_of_processes=1)
assert solution["x"] == reactions
assert solution["y"] == reactions
self.compare_matrices(growth_list, solution["data"])
@pytest.mark.parametrize("solver", list(solver_dict))
def test_flux_variability_benchmark(self, large_model, benchmark, solver):
benchmark(flux_variability_analysis, large_model, solver=solver,
reaction_list=large_model.reactions[1::3])
@pytest.mark.parametrize("solver", list(solver_dict))
def test_flux_variability(self, model, fva_results, solver):
if solver == "esolver":
pytest.skip("esolver too slow...")
fva_out = flux_variability_analysis(
model, solver=solver, reaction_list=model.reactions[1::3])
for name, result in iteritems(fva_out):
for k, v in iteritems(result):
assert abs(fva_results[name][k] - v) < 0.00001
def test_fva_infeasible(self, model):
infeasible_model = model.copy()
infeasible_model.reactions.get_by_id("EX_glc__D_e").lower_bound = 0
# ensure that an infeasible model does not run FVA
with pytest.raises(ValueError):
flux_variability_analysis(infeasible_model)
def test_find_blocked_reactions(self, model):
result = find_blocked_reactions(model, model.reactions[40:46])
assert result == ['FRUpts2']
result = find_blocked_reactions(model, model.reactions[42:48])
assert set(result) == {'FUMt2_2', 'FRUpts2'}
result = find_blocked_reactions(model, model.reactions[30:50],
open_exchanges=True)
assert result == []
@classmethod
def construct_ll_test_model(cls):
test_model = Model()
test_model.add_metabolites(Metabolite("A"))
test_model.add_metabolites(Metabolite("B"))
test_model.add_metabolites(Metabolite("C"))
EX_A = Reaction("EX_A")
EX_A.add_metabolites({test_model.metabolites.A: 1})
DM_C = Reaction("DM_C")
DM_C.add_metabolites({test_model.metabolites.C: -1})
v1 = Reaction("v1")
v1.add_metabolites({test_model.metabolites.A: -1,
test_model.metabolites.B: 1})
v2 = Reaction("v2")
v2.add_metabolites({test_model.metabolites.B: -1,
test_model.metabolites.C: 1})
v3 = Reaction("v3")
v3.add_metabolites({test_model.metabolites.C: -1,
test_model.metabolites.A: 1})
DM_C.objective_coefficient = 1
test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
return test_model
def test_loopless_benchmark(self, benchmark):
test_model = self.construct_ll_test_model()
benchmark(lambda: construct_loopless_model(test_model).optimize())
def test_loopless(self):
try:
get_solver_name(mip=True)
except SolverNotFound:
pytest.skip("no MILP solver found")
test_model = self.construct_ll_test_model()
feasible_sol = construct_loopless_model(test_model).optimize()
test_model.reactions.get_by_id('v3').lower_bound = 1
infeasible_sol = construct_loopless_model(test_model).optimize()
assert feasible_sol.status == "optimal"
assert infeasible_sol.status == "infeasible"
def test_gapfilling(self):
try:
get_solver_name(mip=True)
except SolverNotFound:
pytest.skip("no MILP solver found")
m = Model()
m.add_metabolites(map(Metabolite, ["a", "b", "c"]))
r = Reaction("EX_A")
m.add_reaction(r)
r.add_metabolites({m.metabolites.a: 1})
r = Reaction("r1")
m.add_reaction(r)
r.add_metabolites({m.metabolites.b: -1, m.metabolites.c: 1})
r = Reaction("DM_C")
m.add_reaction(r)
r.add_metabolites({m.metabolites.c: -1})
r.objective_coefficient = 1
U = Model()
r = Reaction("a2b")
U.add_reaction(r)
r.build_reaction_from_string("a --> b", verbose=False)
r = Reaction("a2d")
U.add_reaction(r)
r.build_reaction_from_string("a --> d", verbose=False)
# GrowMatch
result = gapfilling.growMatch(m, U)[0]
assert len(result) == 1
assert result[0].id == "a2b"
# SMILEY
result = gapfilling.SMILEY(m, "b", U)[0]
assert len(result) == 1
assert result[0].id == "a2b"
# 2 rounds of GrowMatch with exchange reactions
result = gapfilling.growMatch(m, None, ex_rxns=True, iterations=2)
assert len(result) == 2
assert len(result[0]) == 1
assert len(result[1]) == 1
assert {i[0].id for i in result} == {"SMILEY_EX_b", "SMILEY_EX_c"}
@pytest.mark.skipif(numpy is None, reason="phase plane require numpy")
def test_phenotype_phase_plane_benchmark(self, model, benchmark):
benchmark(calculate_phenotype_phase_plane,
model, "EX_glc__D_e", "EX_o2_e",
reaction1_npoints=20, reaction2_npoints=20)
@pytest.mark.skipif(numpy is None, reason="phase plane require numpy")
def test_phenotype_phase_plane(self, model):
data = calculate_phenotype_phase_plane(
model, "EX_glc__D_e", "EX_o2_e",
reaction1_npoints=20, reaction2_npoints=20)
assert data.growth_rates.shape == (20, 20)
assert abs(data.growth_rates.max() - 1.20898) < 0.0001
assert abs(data.growth_rates[0, :].max()) < 0.0001
if matplotlib is None:
pytest.skip("can't test plots without matplotlib")
data.plot()
def check_entries(self, out, desired_entries):
"""ensure each entry in desired_entries appears in output"""
output = out.getvalue().strip()
output_set = set((re.sub('\s', '', l) for l in output.splitlines()))
for item in desired_entries:
assert re.sub('\s', '', item) in output_set
@pytest.mark.skipif((pandas is None) or (tabulate is None),
reason="summary methods require pandas and tabulate")
def test_summary_methods(self, model, solved_model):
# Test model summary methods
with pytest.raises(Exception):
model.summary()
desired_entries = [
'idFluxRangeidFluxRangeBiomass_Ecol...0.874',
'o2_e 21.8 [19.9, 23.7]'
'h2o_e 29.2 [25, 30.7]',
'glc__D_e 10 [9.52, 10]'
'co2_e 22.8 [18.9, 24.7]',
'nh4_e 4.77 [4.53, 5.16]'
'h_e 17.5 [16.7, 22.4]',
'pi_e 3.21 [3.05, 3.21]'
'for_e 0 [0, 5.72]',
'ac_e 0 [0, 1.91]',
'pyr_e 0 [0, 1.27]',
'lac__D_e 0 [0, 1.07]',
'succ_e 0 [0, 0.837]',
'glu__L_e 0 [0, 0.636]',
'akg_e 0 [0, 0.715]',
'etoh_e 0 [0, 1.11]',
'acald_e 0 [0, 1.27]',
]
for solver in solver_dict:
with captured_output() as (out, err):
solved_model.summary(fva=0.95, solver=solver)
self.check_entries(out, desired_entries)
# test non-fva version (these should be fixed for textbook model
desired_entries = [
'o2_e 21.8',
'glc__D_e 10',
'nh4_e 4.77',
'pi_e 3.21',
'h2o_e 29.2',
'co2_e 22.8',
'h_e 17.5',
'Biomass_Ecol... 0.874',
]
# Need to use a different method here because
# there are multiple entries per line.
for solver in solver_dict:
with captured_output() as (out, err):
solved_model.summary()
s = out.getvalue()
for i in desired_entries:
assert i in s
# Test metabolite summary methods
desired_entries = [
'PRODUCING REACTIONS -- Ubiquinone-8 (q8_c)',
'% FLUX RXN ID REACTION',
'100% 43.6 CYTBD '
'2.0 h_c + 0.5 o2_c + q8h2_c --> h2o_c + 2.0 h_e...',
'CONSUMING REACTIONS -- Ubiquinone-8 (q8_c)',
'% FLUX RXN ID REACTION',
'88% 38.5 NADH16 '
'4.0 h_c + nadh_c + q8_c --> 3.0 h_e + nad_c + q...',
'12% 5.06 SUCDi q8_c + succ_c --> fum_c + q8h2_c',
]
for solver in solver_dict:
with captured_output() as (out, err):
solved_model.metabolites.q8_c.summary()
self.check_entries(out, desired_entries)
desired_entries = [
'PRODUCING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
'----------------------------------------------------------',
'% FLUX RANGE RXN ID REACTION',
'100% 7.48 [6.17, 9.26] PFK '
'atp_c + f6p_c --> adp_c + fdp_c + h_c',
'CONSUMING REACTIONS -- D-Fructose 1,6-bisphosphate (fdp_c)',
'----------------------------------------------------------',
'% FLUX RANGE RXN ID REACTION',
'100% 7.48 [6.17, 8.92] FBA fdp_c <=> dhap_c + g3p_c',
'0% 0 [0, 1.72] FBP '
'fdp_c + h2o_c --> f6p_c + pi_c',
]
for solver in solver_dict:
with captured_output() as (out, err):
solved_model.metabolites.fdp_c.summary(fva=0.99, solver=solver)
self.check_entries(out, desired_entries)
| lgpl-2.1 |
cactusbin/nyt | matplotlib/lib/mpl_toolkits/mplot3d/axis3d.py | 6 | 16960 | #!/usr/bin/python
# axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <[email protected]>
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
import art3d
import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
self._axinfo.update({'label' : {'space_factor': 1.6,
'va': 'center',
'ha': 'center'},
'tick' : {'inward_factor': 0.2,
'outward_factor': 0.1},
'ticklabel': {'space_factor': 0.7},
'axisline': {'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' : {'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform(peparray[0:2, 1]) -
self.axes.transAxes.transform(peparray[0:2, 0]))
lxyz = 0.5*(edgep1 + edgep2)
labeldeltas = info['label']['space_factor'] * deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = zip(xyz1, xyz0, xyz2)
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
labeldeltas = [info['ticklabel']['space_factor'] * x for
x in deltas]
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
| unlicense |
reuk/waveguide | demo/evaluation/tunnel/spectrograms.py | 2 | 2202 | #!/usr/local/bin/python
import numpy as np
import matplotlib
render = True
if render:
matplotlib.use('pgf')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from string import split
import scipy.signal as signal
import pysndfile
import math
import os
import re
import json
def get_specgram_data(fname):
sndfile = pysndfile.PySndfile(fname, 'r')
if sndfile.channels() != 1:
raise RuntimeError('please only load mono files')
Fs = sndfile.samplerate()
signal = sndfile.read_frames()
pxx, freq, time = mlab.specgram(signal, NFFT=1024, noverlap=512, Fs=Fs)
return pxx, freq, time
def main():
files = [
("scattering: 0", "tunnel_near_no_scatter.wav"),
("scattering: 0.1", "tunnel_near.wav"),
]
specgrams = [(label, get_specgram_data(fname)) for label, fname in files]
fig, axes = plt.subplots(nrows=len(specgrams), sharex=True)
cmap = plt.get_cmap('viridis')
Z = map(lambda (label, (pxx, freq, time)): 10 * np.log10(pxx), specgrams)
maxes = map(lambda z: np.nanmax(z), Z)
print maxes
vmin = -100
vmax = max(maxes)
for (label, (pxx, freq, time)), ax, z in zip(specgrams, axes, Z):
im = ax.pcolormesh(time, freq, z, cmap=cmap, vmin=vmin, vmax=vmax, rasterized=True)
ax.set_ylim(20, 20000)
ax.set_yscale('log')
ax.set_title(label)
ax.set_xlabel('time / s')
ax.set_ylabel('frequency / Hz')
ax.set_xlim(0, 4)
cb = fig.colorbar(im, ax=axes.ravel().tolist(), use_gridspec=True)
cb.set_label('dB')
plt.suptitle('Spectrograms of Tunnel Impulse Responses, With and Without Scattering')
plt.tight_layout()
plt.subplots_adjust(top=0.9)
plt.subplots_adjust(right=0.75)
plt.show()
if render:
plt.savefig('tunnel_spectrograms.svg', bbox_inches='tight', dpi=96, format='svg')
if __name__ == '__main__':
pgf_with_rc_fonts = {
'font.family': 'serif',
'font.serif': [],
'font.sans-serif': ['Helvetica Neue'],
'font.monospace': ['Input Mono Condensed'],
'legend.fontsize': 12,
}
matplotlib.rcParams.update(pgf_with_rc_fonts)
main()
| gpl-2.0 |
sarunya-w/CS402-PROJECT | Project/feature_extraction/feature_extraction_parallel/hogengine.py | 1 | 1833 | # -*- coding: utf-8 -*-
"""
Created on Fri May 01 02:10:31 2015
@author: Sarunya
"""
import sys
import numpy as np
from matplotlib import pyplot as plt
#from cv2 import HOGDescriptor
from skimage.feature import hog
sys.setrecursionlimit(10000)
clmax = 11 #clmax is amount of class
def normHOG(images_file):
img = np.array(images_file)
width, height = img.shape
# SKIMAGE
f = hog(img, normalise=True,pixels_per_cell=(height//4, width//4))
print f.shape
# OPENCV HOGDescriptor(win_size,block_size,block_stride,cell_size,nbins
#opencv_hog = HOGDescriptor((200,200), (16,16), (8,8), (8,8), 9)
#f = opencv_hog.compute(img)
return f
def getValue(images):
f = normHOG(images)
return f.reshape(-1)
def getVector(images_files,class_files,samples,isTrain):
from PIL import Image
bs = 200
clmax = 11 #clmax is amount of class
sub_img = []
sub_cs = []
bb = bs//2
pos = []
for f in xrange(len(images_files)):
img = Image.open(images_files[f]).convert('L')
w , h = img.size
pixels=[]
for i in xrange(samples):
r = np.random.randint(bb, h-bb)
c = np.random.randint(bb, w-bb)
pixels.append((c,r))
#if isTrain==False:
# pos.append((c,r))
box = (c-bb, r-bb, c + bb, r + bb)
output_img = img.crop(box)
sub_img.append(getValue(output_img))
if isTrain:
cimg = Image.open(class_files[f]).convert('L')
for p in pixels:
sub_cs.append(cimg.getpixel(p))
if isTrain:
sub_img=np.array(sub_img,dtype=np.float32)
sub_cs=np.array(sub_cs,dtype=np.uint32)
sub_cs[sub_cs==255]= clmax - 1
else:
sub_cs=None
return (sub_img ,sub_cs)
| mit |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/io/tests/test_data.py | 1 | 16121 | from __future__ import print_function
from pandas import compat
import warnings
import nose
from nose.tools import assert_equal
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.io import data as web
from pandas.io.data import DataReader, SymbolWarning
from pandas.util.testing import (assert_series_equal, assert_produces_warning,
network, assert_frame_equal)
import pandas.util.testing as tm
from numpy.testing import assert_array_equal
if compat.PY3:
from urllib.error import HTTPError
else:
from urllib2 import HTTPError
def _skip_if_no_lxml():
try:
import lxml
except ImportError:
raise nose.SkipTest("no lxml")
def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning):
all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in
compat.iteritems(obj)))
n_all_nan_cols = all_nan_cols.sum()
valid_warnings = pd.Series([wng for wng in wngs if isinstance(wng, cls)])
assert_equal(len(valid_warnings), n_all_nan_cols)
failed_symbols = all_nan_cols[all_nan_cols].index
msgs = valid_warnings.map(lambda x: x.message)
assert msgs.str.contains('|'.join(failed_symbols)).all()
class TestGoogle(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestGoogle, cls).setUpClass()
cls.locales = tm.get_locales(prefix='en_US')
if not cls.locales:
raise nose.SkipTest("US English locale not available for testing")
@classmethod
def tearDownClass(cls):
super(TestGoogle, cls).tearDownClass()
del cls.locales
@network
def test_google(self):
# asserts that google is minimally working and that it throws
# an exception when DataReader can't get a 200 response from
# google
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
for locale in self.locales:
with tm.set_locale(locale):
panel = web.DataReader("F", 'google', start, end)
self.assertEquals(panel.Close[-1], 13.68)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER",
'google', start, end)
@network
def test_get_quote_fails(self):
self.assertRaises(NotImplementedError, web.get_quote_google,
pd.Series(['GOOG', 'AAPL', 'GOOG']))
@network
def test_get_goog_volume(self):
for locale in self.locales:
with tm.set_locale(locale):
df = web.get_data_google('GOOG').sort_index()
self.assertEqual(df.Volume.ix['OCT-08-2010'], 2863473)
@network
def test_get_multi1(self):
for locale in self.locales:
sl = ['AAPL', 'AMZN', 'GOOG']
with tm.set_locale(locale):
pan = web.get_data_google(sl, '2012')
ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
if (hasattr(pan, 'Close') and hasattr(pan.Close, 'GOOG') and
hasattr(pan.Close, 'AAPL')):
self.assertEquals(ts[0].dayofyear, 96)
else:
self.assertRaises(AttributeError, lambda: pan.Close)
@network
def test_get_multi2(self):
with warnings.catch_warnings(record=True) as w:
for locale in self.locales:
with tm.set_locale(locale):
pan = web.get_data_google(['GE', 'MSFT', 'INTC'],
'JAN-01-12', 'JAN-31-12')
result = pan.Close.ix['01-18-12']
assert_n_failed_equals_n_null_columns(w, result)
# sanity checking
assert np.issubdtype(result.dtype, np.floating)
result = pan.Open.ix['Jan-15-12':'Jan-20-12']
self.assertEqual((4, 3), result.shape)
assert_n_failed_equals_n_null_columns(w, result)
class TestYahoo(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestYahoo, cls).setUpClass()
_skip_if_no_lxml()
@network
def test_yahoo(self):
# asserts that yahoo is minimally working and that it throws
# an exception when DataReader can't get a 200 response from
# yahoo
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
self.assertEquals( web.DataReader("F", 'yahoo', start,
end)['Close'][-1], 13.68)
@network
def test_yahoo_fails(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER",
'yahoo', start, end)
@network
def test_get_quote_series(self):
df = web.get_quote_yahoo(pd.Series(['GOOG', 'AAPL', 'GOOG']))
assert_series_equal(df.ix[0], df.ix[2])
@network
def test_get_quote_string(self):
df = web.get_quote_yahoo('GOOG')
@network
def test_get_quote_stringlist(self):
df = web.get_quote_yahoo(['GOOG', 'AAPL', 'GOOG'])
assert_series_equal(df.ix[0], df.ix[2])
@network
def test_get_components_dow_jones(self):
raise nose.SkipTest('unreliable test, receive partial components back for dow_jones')
df = web.get_components_yahoo('^DJI') #Dow Jones
assert isinstance(df, pd.DataFrame)
self.assertEqual(len(df), 30)
@network
def test_get_components_dax(self):
raise nose.SkipTest('unreliable test, receive partial components back for dax')
df = web.get_components_yahoo('^GDAXI') #DAX
assert isinstance(df, pd.DataFrame)
self.assertEqual(len(df), 30)
self.assertEqual(df[df.name.str.contains('adidas', case=False)].index,
'ADS.DE')
@network
def test_get_components_nasdaq_100(self):
# as of 7/12/13 the conditional will test false because the link is invalid
raise nose.SkipTest('unreliable test, receive partial components back for nasdaq_100')
df = web.get_components_yahoo('^NDX') #NASDAQ-100
assert isinstance(df, pd.DataFrame)
if len(df) > 1:
# Usual culprits, should be around for a while
assert 'AAPL' in df.index
assert 'GOOG' in df.index
assert 'AMZN' in df.index
else:
expected = DataFrame({'exchange': 'N/A', 'name': '@^NDX'},
index=['@^NDX'])
assert_frame_equal(df, expected)
@network
def test_get_data_single_symbol(self):
#single symbol
#http://finance.yahoo.com/q/hp?s=GOOG&a=09&b=08&c=2010&d=09&e=10&f=2010&g=d
df = web.get_data_yahoo('GOOG')
self.assertEqual(df.Volume.ix['OCT-08-2010'], 2859200)
@network
def test_get_data_multiple_symbols(self):
sl = ['AAPL', 'AMZN', 'GOOG']
pan = web.get_data_yahoo(sl, '2012')
def testit():
ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG]
self.assertEquals(ts[0].dayofyear, 96)
if hasattr(pan.Close, 'GOOG') and hasattr(pan.Close, 'AAPL'):
testit()
else:
self.assertRaises(AttributeError, testit)
@network
def test_get_data_multiple_symbols_two_dates(self):
pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12', 'JAN-31-12')
result = pan.Close.ix['01-18-12']
self.assertEqual(len(result), 3)
# sanity checking
assert np.issubdtype(result.dtype, np.floating)
expected = np.array([[ 18.99, 28.4 , 25.18],
[ 18.58, 28.31, 25.13],
[ 19.03, 28.16, 25.52],
[ 18.81, 28.82, 25.87]])
result = pan.Open.ix['Jan-15-12':'Jan-20-12']
self.assertEqual(expected.shape, result.shape)
@network
def test_get_date_ret_index(self):
pan = web.get_data_yahoo(['GE', 'INTC', 'IBM'], '1977', '1987',
ret_index=True)
self.assert_(hasattr(pan, 'Ret_Index'))
if hasattr(pan, 'Ret_Index') and hasattr(pan.Ret_Index, 'INTC'):
tstamp = pan.Ret_Index.INTC.first_valid_index()
result = pan.Ret_Index.ix[tstamp]['INTC']
self.assertEqual(result, 1.0)
# sanity checking
assert np.issubdtype(pan.values.dtype, np.floating)
class TestYahooOptions(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestYahooOptions, cls).setUpClass()
_skip_if_no_lxml()
# aapl has monthlies
cls.aapl = web.Options('aapl', 'yahoo')
today = datetime.today()
year = today.year
month = today.month + 1
if month > 12:
year = year + 1
month = 1
cls.expiry = datetime(year, month, 1)
@classmethod
def tearDownClass(cls):
super(TestYahooOptions, cls).tearDownClass()
del cls.aapl, cls.expiry
@network
def test_get_options_data(self):
try:
calls, puts = self.aapl.get_options_data(expiry=self.expiry)
except IndexError:
warnings.warn("IndexError thrown no tables found")
else:
assert len(calls)>1
assert len(puts)>1
def test_get_options_data(self):
# regression test GH6105
self.assertRaises(ValueError,self.aapl.get_options_data,month=3)
self.assertRaises(ValueError,self.aapl.get_options_data,year=1992)
@network
def test_get_near_stock_price(self):
try:
calls, puts = self.aapl.get_near_stock_price(call=True, put=True,
expiry=self.expiry)
except IndexError:
warnings.warn("IndexError thrown no tables found")
else:
self.assertEqual(len(calls), 5)
self.assertEqual(len(puts), 5)
@network
def test_get_call_data(self):
try:
calls = self.aapl.get_call_data(expiry=self.expiry)
except IndexError:
warnings.warn("IndexError thrown no tables found")
else:
assert len(calls)>1
@network
def test_get_put_data(self):
try:
puts = self.aapl.get_put_data(expiry=self.expiry)
except IndexError:
warnings.warn("IndexError thrown no tables found")
else:
assert len(puts)>1
class TestOptionsWarnings(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestOptionsWarnings, cls).setUpClass()
_skip_if_no_lxml()
with assert_produces_warning(FutureWarning):
cls.aapl = web.Options('aapl')
today = datetime.today()
cls.year = today.year
cls.month = today.month + 1
if cls.month > 12:
cls.year += 1
cls.month = 1
@classmethod
def tearDownClass(cls):
super(TestOptionsWarnings, cls).tearDownClass()
del cls.aapl, cls.year, cls.month
@network
def test_get_options_data_warning(self):
with assert_produces_warning():
print('month: {0}, year: {1}'.format(self.month, self.year))
try:
self.aapl.get_options_data(month=self.month, year=self.year)
except IndexError:
warnings.warn("IndexError thrown no tables found")
@network
def test_get_near_stock_price_warning(self):
with assert_produces_warning():
print('month: {0}, year: {1}'.format(self.month, self.year))
try:
calls_near, puts_near = self.aapl.get_near_stock_price(call=True,
put=True,
month=self.month,
year=self.year)
except IndexError:
warnings.warn("IndexError thrown no tables found")
@network
def test_get_call_data_warning(self):
with assert_produces_warning():
print('month: {0}, year: {1}'.format(self.month, self.year))
try:
self.aapl.get_call_data(month=self.month, year=self.year)
except IndexError:
warnings.warn("IndexError thrown no tables found")
@network
def test_get_put_data_warning(self):
with assert_produces_warning():
print('month: {0}, year: {1}'.format(self.month, self.year))
try:
self.aapl.get_put_data(month=self.month, year=self.year)
except IndexError:
warnings.warn("IndexError thrown no tables found")
class TestDataReader(tm.TestCase):
def test_is_s3_url(self):
from pandas.io.common import _is_s3_url
self.assert_(_is_s3_url("s3://pandas/somethingelse.com"))
@network
def test_read_yahoo(self):
gs = DataReader("GS", "yahoo")
assert isinstance(gs, DataFrame)
@network
def test_read_google(self):
gs = DataReader("GS", "google")
assert isinstance(gs, DataFrame)
@network
def test_read_fred(self):
vix = DataReader("VIXCLS", "fred")
assert isinstance(vix, DataFrame)
@network
def test_read_famafrench(self):
for name in ("F-F_Research_Data_Factors",
"F-F_Research_Data_Factors_weekly", "6_Portfolios_2x3",
"F-F_ST_Reversal_Factor"):
ff = DataReader(name, "famafrench")
assert ff
assert isinstance(ff, dict)
class TestFred(tm.TestCase):
@network
def test_fred(self):
# Throws an exception when DataReader can't get a 200 response from
# FRED.
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
received = web.DataReader("GDP", "fred", start, end)['GDP'].tail(1)[0]
self.assertEquals(int(received), 16535)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT SERIES",
'fred', start, end)
@network
def test_fred_nan(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
df = web.DataReader("DFII5", "fred", start, end)
assert pd.isnull(df.ix['2010-01-01'][0])
@network
def test_fred_parts(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
df = web.get_data_fred("CPIAUCSL", start, end)
self.assertEqual(df.ix['2010-05-01'][0], 217.23)
t = df.CPIAUCSL.values
assert np.issubdtype(t.dtype, np.floating)
self.assertEqual(t.shape, (37,))
@network
def test_fred_part2(self):
expected = [[576.7],
[962.9],
[684.7],
[848.3],
[933.3]]
result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5]
assert_array_equal(result.values, np.array(expected))
@network
def test_invalid_series(self):
name = "NOT A REAL SERIES"
self.assertRaises(Exception, web.get_data_fred, name)
@network
def test_fred_multi(self):
names = ['CPIAUCSL', 'CPALTT01USQ661S', 'CPILFESL']
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
received = web.DataReader(names, "fred", start, end).head(1)
expected = DataFrame([[217.478, 0.99701529, 220.544]], columns=names,
index=[pd.tslib.Timestamp('2010-01-01 00:00:00')])
expected.index.rename('DATE', inplace=True)
assert_frame_equal(received, expected, check_less_precise=True)
@network
def test_fred_multi_bad_series(self):
names = ['NOTAREALSERIES', 'CPIAUCSL', "ALSO FAKE"]
with tm.assertRaises(HTTPError):
DataReader(names, data_source="fred")
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
openelections/openelections-data-ca | src/parse_general_2014.py | 2 | 3793 | import pandas as pd
import re
from swdb.util import COUNTIES
url_prefix = 'http://elections.cdn.sos.ca.gov/sov/2014-general/xls/'
state_level_files = [
('19-governor.xls', 'Governor'),
('22-lieutenant-governor.xls', 'Lieutenant Governor'),
('25-secretary-of-state.xls', 'Secretary of State'),
('28-controller.xls', 'Controller'),
('31-treasurer.xls', 'Treasurer'),
('34-attorney-general.xls', 'Attorney General'),
('37-insurance-commissioner.xls', 'Insurance Commissioner'),
('85-superintendent-of-public-instruction.xls',
'Superintendent of Public Instruction')
]
district_files = [
('40-board-of-equalization.xls', 'Board of Equalization'),
('43-congress.xls', 'U.S. House'),
('58-state-senator.xls', 'State Senate'),
('64-state-assemblymember.xls', 'State Assembly'),
]
proposition_file = '88-ballot-measures.xls'
fieldnames = ["county", "office", "district", "party", "candidate", "votes"]
def parse_candidate(value):
return ' '.join(value.strip("*").replace('\n', ' ').split())
def parse(fname, office, district):
df = pd.read_excel(
url_prefix + fname).dropna().reset_index().rename(columns={'index': 'county'}).rename(columns=parse_candidate)
parties = df[[1, 2]].iloc[0].to_dict()
df = df[df.county.isin(COUNTIES)]
df = pd.melt(df, id_vars='county', value_vars=df.columns.tolist()[
1:], var_name='candidate', value_name='votes')
df['party'] = df.candidate.apply(lambda x: parties[x])
df = df.assign(office=office, district=district)
return df[fieldnames]
def parse_sub(sub, office, district):
sub.columns = ['county'] + sub[[1, 2]].iloc[0].tolist() + ['office']
sub = sub.dropna(axis=1, how='all')
sub = sub.rename(columns=parse_candidate)
parties = sub[[1, 2]].iloc[1].to_dict()
sub = sub[sub.county.isin(COUNTIES)]
sub = pd.melt(sub, id_vars=['county', 'office'], value_vars=sub.columns.tolist()[
1:-1], var_name='candidate', value_name='votes')
sub['party'] = sub.candidate.apply(lambda x: parties[x])
sub = sub.assign(office=office, district=district)
return sub[fieldnames]
def parse_district(fname, office):
df = pd.read_excel(
url_prefix + fname, header=None, names=['county', 'cand1', 'cand2'])
df['office'] = df.county.str.endswith('District', na=False)
df['office'] = df[df.county.str.endswith(
'District', na=False)]
df.office = df.office.fillna(method='pad')
return pd.concat([parse_sub(df[df.office == district][df.cand1.notnull()], office, re.sub("[^\d]+", "", district)) for district in df.office.unique()])
def parse_propositions(fname):
df = pd.read_excel(url_prefix + fname,
header=None).fillna(axis=1, method='pad')
df.columns = df.iloc[0].fillna(
'county') + ',' + df.iloc[3].fillna('').str.lstrip(' ')
df = df.rename(columns={'county,': 'county'})
df = df[df.county.isin(COUNTIES)]
df = pd.melt(df, id_vars=['county'], value_vars=df.columns.tolist()[
1:], var_name='cand_office', value_name='votes')
oc = df['cand_office'].apply(lambda x: pd.Series(x.split(',')))
df['office'] = oc[0]
df['candidate'] = oc[1]
df = df.assign(party='', district='')
return df[fieldnames]
def main():
df = pd.concat([parse(fname, office, '')
for fname, office in state_level_files] +
[parse_district(fname, office)
for fname, office in district_files] +
[parse_propositions(proposition_file)])
for x in ['candidate', 'district', 'office', 'county']:
df = df.sort_values(by=x, kind='mergesort')
df.to_csv('2014/20141104__ca__general.csv', index=False)
if __name__ == "__main__":
main()
| mit |
jstoxrocky/statsmodels | statsmodels/sandbox/examples/try_smoothers.py | 39 | 2655 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 01 15:17:52 2011
Author: Mike
Author: Josef
mainly script for checking Kernel Regression
"""
import numpy as np
if __name__ == "__main__":
#from statsmodels.sandbox.nonparametric import smoothers as s
from statsmodels.sandbox.nonparametric import smoothers, kernels
import matplotlib.pyplot as plt
#from numpy import sin, array, random
import time
np.random.seed(500)
nobs = 250
sig_fac = 0.5
#x = np.random.normal(size=nobs)
x = np.random.uniform(-2, 2, size=nobs)
#y = np.array([np.sin(i*5)/i + 2*i + (3+i)*np.random.normal() for i in x])
y = np.sin(x*5)/x + 2*x + sig_fac * (3+x)*np.random.normal(size=nobs)
K = kernels.Biweight(0.25)
K2 = kernels.CustomKernel(lambda x: (1 - x*x)**2, 0.25, domain = [-1.0,
1.0])
KS = smoothers.KernelSmoother(x, y, K)
KS2 = smoothers.KernelSmoother(x, y, K2)
KSx = np.arange(-3, 3, 0.1)
start = time.time()
KSy = KS.conf(KSx)
KVar = KS.std(KSx)
print(time.time() - start) # This should be significantly quicker...
start = time.time() #
KS2y = KS2.conf(KSx) #
K2Var = KS2.std(KSx) #
print(time.time() - start) # ...than this.
KSConfIntx, KSConfInty = KS.conf(15)
print("Norm const should be 0.9375")
print(K2.norm_const)
print("L2 Norms Should Match:")
print(K.L2Norm)
print(K2.L2Norm)
print("Fit values should match:")
#print zip(KSy, KS2y)
print(KSy[28])
print(KS2y[28])
print("Var values should match:")
#print zip(KVar, K2Var)
print(KVar[39])
print(K2Var[39])
fig = plt.figure()
ax = fig.add_subplot(221)
ax.plot(x, y, "+")
ax.plot(KSx, KSy, "-o")
#ax.set_ylim(-20, 30)
ax2 = fig.add_subplot(222)
ax2.plot(KSx, KVar, "-o")
ax3 = fig.add_subplot(223)
ax3.plot(x, y, "+")
ax3.plot(KSx, KS2y, "-o")
#ax3.set_ylim(-20, 30)
ax4 = fig.add_subplot(224)
ax4.plot(KSx, K2Var, "-o")
fig2 = plt.figure()
ax5 = fig2.add_subplot(111)
ax5.plot(x, y, "+")
ax5.plot(KSConfIntx, KSConfInty, "-o")
import statsmodels.nonparametric.smoothers_lowess as lo
ys = lo.lowess(y, x)
ax5.plot(ys[:,0], ys[:,1], 'b-')
ys2 = lo.lowess(y, x, frac=0.25)
ax5.plot(ys2[:,0], ys2[:,1], 'b--', lw=2)
#need to sort for matplolib plot ?
xind = np.argsort(x)
pmod = smoothers.PolySmoother(5, x[xind])
pmod.fit(y[xind])
yp = pmod(x[xind])
ax5.plot(x[xind], yp, 'k-')
ax5.set_title('Kernel regression, lowess - blue, polysmooth - black')
#plt.show()
| bsd-3-clause |
zhangpf/vbox | src/VBox/ValidationKit/testmanager/webui/wuimain.py | 4 | 50437 | # -*- coding: utf-8 -*-
# $Id$
"""
Test Manager Core - WUI - The Main page.
"""
__copyright__ = \
"""
Copyright (C) 2012-2014 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision$"
# Standard Python imports.
# Validation Kit imports.
from testmanager import config;
from testmanager.webui.wuibase import WuiDispatcherBase, WuiException;
from testmanager.webui.wuicontentbase import WuiTmLink;
from testmanager.core.report import ReportLazyModel, ReportGraphModel, ReportModelBase;
from testmanager.core.testresults import TestResultLogic, TestResultFileDataEx;
from testmanager.core.base import TMExceptionBase, TMTooManyRows;
from testmanager.core.testset import TestSetData, TestSetLogic;
from testmanager.core.build import BuildDataEx;
from testmanager.core.testbox import TestBoxData
from testmanager.core.testgroup import TestGroupData;
from testmanager.core.testcase import TestCaseDataEx
from testmanager.core.testcaseargs import TestCaseArgsDataEx
from testmanager.core.vcsrevisions import VcsRevisionLogic;
from common import webutils, utils;
class WuiMain(WuiDispatcherBase):
"""
WUI Main page.
Note! All cylic dependency avoiance stuff goes here in the dispatcher code,
not in the action specific code. This keeps the uglyness in one place
and reduces load time dependencies in the more critical code path.
"""
## The name of the script.
ksScriptName = 'index.py'
## @name Actions
## @{
ksActionResultsUnGrouped = 'ResultsUnGrouped'
ksActionResultsGroupedBySchedGroup = 'ResultsGroupedBySchedGroup'
ksActionResultsGroupedByTestGroup = 'ResultsGroupedByTestGroup'
ksActionResultsGroupedByBuildRev = 'ResultsGroupedByBuildRev'
ksActionResultsGroupedByTestBox = 'ResultsGroupedByTestBox'
ksActionResultsGroupedByTestCase = 'ResultsGroupedByTestCase'
ksActionTestResultDetails = 'TestResultDetails'
ksActionViewLog = 'ViewLog'
ksActionGetFile = 'GetFile'
ksActionReportSummary = 'ReportSummary';
ksActionReportRate = 'ReportRate';
ksActionReportFailureReasons = 'ReportFailureReasons';
ksActionGraphWiz = 'GraphWiz';
ksActionVcsHistoryTooltip = 'VcsHistoryTooltip';
## @}
## @name Standard report parameters
## @{
ksParamReportPeriods = 'cPeriods';
ksParamReportPeriodInHours = 'cHoursPerPeriod';
ksParamReportSubject = 'sSubject';
ksParamReportSubjectIds = 'SubjectIds';
## @}
## @name Graph Wizard parameters
## Common parameters: ksParamReportPeriods, ksParamReportPeriodInHours, ksParamReportSubjectIds,
## ksParamReportSubject, ksParamEffectivePeriod, and ksParamEffectiveDate.
## @{
ksParamGraphWizTestBoxIds = 'aidTestBoxes';
ksParamGraphWizBuildCatIds = 'aidBuildCats';
ksParamGraphWizTestCaseIds = 'aidTestCases';
ksParamGraphWizSepTestVars = 'fSepTestVars';
ksParamGraphWizImpl = 'enmImpl';
ksParamGraphWizWidth = 'cx';
ksParamGraphWizHeight = 'cy';
ksParamGraphWizDpi = 'dpi';
ksParamGraphWizFontSize = 'cPtFont';
ksParamGraphWizErrorBarY = 'fErrorBarY';
ksParamGraphWizMaxErrorBarY = 'cMaxErrorBarY';
ksParamGraphWizMaxPerGraph = 'cMaxPerGraph';
ksParamGraphWizXkcdStyle = 'fXkcdStyle';
ksParamGraphWizTabular = 'fTabular';
ksParamGraphWizSrcTestSetId = 'idSrcTestSet';
## @}
## @name Graph implementations values for ksParamGraphWizImpl.
## @{
ksGraphWizImpl_Default = 'default';
ksGraphWizImpl_Matplotlib = 'matplotlib';
ksGraphWizImpl_Charts = 'charts';
kasGraphWizImplValid = [ ksGraphWizImpl_Default, ksGraphWizImpl_Matplotlib, ksGraphWizImpl_Charts];
kaasGraphWizImplCombo = [
( ksGraphWizImpl_Default, 'Default' ),
( ksGraphWizImpl_Matplotlib, 'Matplotlib (server)' ),
( ksGraphWizImpl_Charts, 'Google Charts (client)'),
];
## @}
## @name Log Viewer parameters.
## @{
ksParamLogSetId = 'LogViewer_idTestSet';
ksParamLogFileId = 'LogViewer_idFile';
ksParamLogChunkSize = 'LogViewer_cbChunk';
ksParamLogChunkNo = 'LogViewer_iChunk';
## @}
## @name File getter parameters.
## @{
ksParamGetFileSetId = 'GetFile_idTestSet';
ksParamGetFileId = 'GetFile_idFile';
ksParamGetFileDownloadIt = 'GetFile_fDownloadIt';
## @}
## @name VCS history parameters.
## @{
ksParamVcsHistoryRepository = 'repo';
ksParamVcsHistoryRevision = 'rev';
ksParamVcsHistoryEntries = 'cEntries';
## @}
## Effective time period. one of the first column values in kaoResultPeriods.
ksParamEffectivePeriod = 'sEffectivePeriod'
## If this param is specified, then show only results for this member when results grouped by some parameter.
ksParamGroupMemberId = 'GroupMemberId'
## Optional parameter for indicating whether to restrict the listing to failures only.
ksParamOnlyFailures = 'OnlyFailures'
## Test result period values.
kaoResultPeriods = [
( '1 hour', 'One hour', 1 ),
( '2 hours', 'Two hours', 2 ),
( '3 hours', 'Three hours', 3 ),
( '6 hours', 'Six hours', 6 ),
( '12 hours', '12 hours', 12 ),
( '1 day', 'One day', 24 ),
( '2 days', 'Two days', 48 ),
( '3 days', 'Three days', 72 ),
( '1 week', 'One week', 168 ),
( '2 weeks', 'Two weeks', 336 ),
( '3 weeks', 'Three weeks', 504 ),
( '1 month', 'One month', 31 * 24 ), # The approx hour count varies with the start date.
( '2 months', 'Two month', (31 + 31) * 24 ), # Using maximum values.
( '3 months', 'Three month', (31 + 30 + 31) * 24 ),
( '6 months', 'Six month', (31 + 31 + 30 + 31 + 30 + 31) * 24 ),
( '1 year', 'One year', 365 * 24 ),
];
## The default test result period.
ksResultPeriodDefault = '3 hours';
def __init__(self, oSrvGlue):
WuiDispatcherBase.__init__(self, oSrvGlue, self.ksScriptName);
self._sTemplate = 'template.html'
#
# Populate the action dispatcher dictionary.
#
# Use short form to avoid hitting the right margin (130) when using lambda.
d = self._dDispatch; # pylint: disable=C0103
from testmanager.webui.wuitestresult import WuiGroupedResultList;
#d[self.ksActionResultsUnGrouped] = lambda: self._actionResultsListing(TestResultLogic, WuiGroupedResultList)
d[self.ksActionResultsUnGrouped] = lambda: self._actionGroupedResultsListing(
TestResultLogic.ksResultsGroupingTypeNone,
TestResultLogic,
WuiGroupedResultList)
d[self.ksActionResultsGroupedByTestGroup] = lambda: self._actionGroupedResultsListing(
TestResultLogic.ksResultsGroupingTypeTestGroup,
TestResultLogic,
WuiGroupedResultList)
d[self.ksActionResultsGroupedByBuildRev] = lambda: self._actionGroupedResultsListing(
TestResultLogic.ksResultsGroupingTypeBuildRev,
TestResultLogic,
WuiGroupedResultList)
d[self.ksActionResultsGroupedByTestBox] = lambda: self._actionGroupedResultsListing(
TestResultLogic.ksResultsGroupingTypeTestBox,
TestResultLogic,
WuiGroupedResultList)
d[self.ksActionResultsGroupedByTestCase] = lambda: self._actionGroupedResultsListing(
TestResultLogic.ksResultsGroupingTypeTestCase,
TestResultLogic,
WuiGroupedResultList)
d[self.ksActionResultsGroupedBySchedGroup] = lambda: self._actionGroupedResultsListing(
TestResultLogic.ksResultsGroupingTypeSchedGroup,
TestResultLogic,
WuiGroupedResultList)
d[self.ksActionTestResultDetails] = self.actionTestResultDetails
d[self.ksActionViewLog] = self.actionViewLog;
d[self.ksActionGetFile] = self.actionGetFile;
from testmanager.webui.wuireport import WuiReportSummary, WuiReportSuccessRate, WuiReportFailureReasons;
d[self.ksActionReportSummary] = lambda: self._actionGenericReport(ReportLazyModel, WuiReportSummary);
d[self.ksActionReportRate] = lambda: self._actionGenericReport(ReportLazyModel, WuiReportSuccessRate);
d[self.ksActionReportFailureReasons] = lambda: self._actionGenericReport(ReportLazyModel, WuiReportFailureReasons);
d[self.ksActionGraphWiz] = self._actionGraphWiz;
d[self.ksActionVcsHistoryTooltip] = self._actionVcsHistoryTooltip;
#
# Popupate the menus.
#
# Additional URL parameters keeping for time navigation.
sExtraTimeNav = ''
dCurParams = oSrvGlue.getParameters()
if dCurParams is not None:
asActionUrlExtras = [ self.ksParamItemsPerPage, self.ksParamEffectiveDate, self.ksParamEffectivePeriod, ];
for sExtraParam in asActionUrlExtras:
if sExtraParam in dCurParams:
sExtraTimeNav += '&%s' % webutils.encodeUrlParams({sExtraParam: dCurParams[sExtraParam]})
# Shorthand to keep within margins.
sActUrlBase = self._sActionUrlBase;
self._aaoMenus = \
[
[
'Inbox', sActUrlBase + 'TODO', ## @todo list of failures that needs categorizing.
[]
],
[
'Reports', sActUrlBase + self.ksActionReportSummary,
[
[ 'Summary', sActUrlBase + self.ksActionReportSummary ],
[ 'Success Rate', sActUrlBase + self.ksActionReportRate ],
[ 'Failure Reasons', sActUrlBase + self.ksActionReportFailureReasons ],
]
],
[
'Test Results', sActUrlBase + self.ksActionResultsUnGrouped + sExtraTimeNav,
[
[ 'Ungrouped results', sActUrlBase + self.ksActionResultsUnGrouped + sExtraTimeNav ],
[ 'Grouped by Scheduling Group', sActUrlBase + self.ksActionResultsGroupedBySchedGroup + sExtraTimeNav ],
[ 'Grouped by Test Group', sActUrlBase + self.ksActionResultsGroupedByTestGroup + sExtraTimeNav ],
[ 'Grouped by TestBox', sActUrlBase + self.ksActionResultsGroupedByTestBox + sExtraTimeNav ],
[ 'Grouped by Test Case', sActUrlBase + self.ksActionResultsGroupedByTestCase + sExtraTimeNav ],
[ 'Grouped by Revision', sActUrlBase + self.ksActionResultsGroupedByBuildRev + sExtraTimeNav ],
]
],
[
'> Admin', 'admin.py?' + webutils.encodeUrlParams(self._dDbgParams), []
],
];
def _actionDefault(self):
"""Show the default admin page."""
from testmanager.webui.wuitestresult import WuiGroupedResultList;
self._sAction = self.ksActionResultsUnGrouped
return self._actionGroupedResultsListing(TestResultLogic.ksResultsGroupingTypeNone,
TestResultLogic,
WuiGroupedResultList)
#
# Navigation bar stuff
#
def _generateStatusSelector(self, dParams, fOnlyFailures):
"""
Generate HTML code for the status code selector. Currently very simple.
"""
dParams[self.ksParamOnlyFailures] = not fOnlyFailures;
return WuiTmLink('Show all results' if fOnlyFailures else 'Only show failed tests', '', dParams,
fBracketed = False).toHtml();
def _generateTimeSelector(self, dParams, sPreamble, sPostamble):
"""
Generate HTML code for time selector.
"""
if WuiDispatcherBase.ksParamEffectiveDate in dParams:
tsEffective = dParams[WuiDispatcherBase.ksParamEffectiveDate]
del dParams[WuiDispatcherBase.ksParamEffectiveDate]
else:
tsEffective = ''
# Forget about page No when changing a period
if WuiDispatcherBase.ksParamPageNo in dParams:
del dParams[WuiDispatcherBase.ksParamPageNo]
sHtmlTimeSelector = '<form name="TimeForm" method="GET">\n'
sHtmlTimeSelector += sPreamble;
sHtmlTimeSelector += '\n <select name="%s" onchange="window.location=' % WuiDispatcherBase.ksParamEffectiveDate
sHtmlTimeSelector += '\'?%s&%s=\' + ' % (webutils.encodeUrlParams(dParams), WuiDispatcherBase.ksParamEffectiveDate)
sHtmlTimeSelector += 'this.options[this.selectedIndex].value;" title="Effective date">\n'
aoWayBackPoints = [
('+0000-00-00 00:00:00.00', 'Now', ' title="Present Day. Present Time."'), # lain :)
('-0000-00-00 01:00:00.00', 'One hour ago', ''),
('-0000-00-00 02:00:00.00', 'Two hours ago', ''),
('-0000-00-00 03:00:00.00', 'Three hours ago', ''),
('-0000-00-01 00:00:00.00', 'One day ago', ''),
('-0000-00-02 00:00:00.00', 'Two days ago', ''),
('-0000-00-03 00:00:00.00', 'Three days ago', ''),
('-0000-00-07 00:00:00.00', 'One week ago', ''),
('-0000-00-14 00:00:00.00', 'Two weeks ago', ''),
('-0000-00-21 00:00:00.00', 'Three weeks ago', ''),
('-0000-01-00 00:00:00.00', 'One month ago', ''),
('-0000-02-00 00:00:00.00', 'Two months ago', ''),
('-0000-03-00 00:00:00.00', 'Three months ago', ''),
('-0000-04-00 00:00:00.00', 'Four months ago', ''),
('-0000-05-00 00:00:00.00', 'Five months ago', ''),
('-0000-06-00 00:00:00.00', 'Half a year ago', ''),
('-0001-00-00 00:00:00.00', 'One year ago', ''),
]
fSelected = False;
for sTimestamp, sWayBackPointCaption, sExtraAttrs in aoWayBackPoints:
if sTimestamp == tsEffective:
fSelected = True;
sHtmlTimeSelector += ' <option value="%s"%s%s>%s</option>\n' \
% (webutils.quoteUrl(sTimestamp),
' selected="selected"' if sTimestamp == tsEffective else '',
sExtraAttrs, sWayBackPointCaption)
if not fSelected and tsEffective != '':
sHtmlTimeSelector += ' <option value="%s" selected>%s</option>\n' \
% (webutils.quoteUrl(tsEffective), tsEffective)
sHtmlTimeSelector += ' </select>\n';
sHtmlTimeSelector += sPostamble;
sHtmlTimeSelector += '\n</form>\n'
return sHtmlTimeSelector
def _generateTimeWalker(self, dParams, tsEffective, sCurPeriod):
"""
Generates HTML code for walking back and forth in time.
"""
# Have to do some math here. :-/
if tsEffective is None:
self._oDb.execute('SELECT CURRENT_TIMESTAMP - \'' + sCurPeriod + '\'::interval');
tsNext = None;
tsPrev = self._oDb.fetchOne()[0];
else:
self._oDb.execute('SELECT %s::TIMESTAMP - \'' + sCurPeriod + '\'::interval,\n'
' %s::TIMESTAMP + \'' + sCurPeriod + '\'::interval',
(tsEffective, tsEffective,));
tsPrev, tsNext = self._oDb.fetchOne();
# Forget about page No when changing a period
if WuiDispatcherBase.ksParamPageNo in dParams:
del dParams[WuiDispatcherBase.ksParamPageNo]
# Format.
dParams[WuiDispatcherBase.ksParamEffectiveDate] = str(tsPrev);
sPrev = '<a href="?%s" title="One period earlier"><<</a> ' \
% (webutils.encodeUrlParams(dParams),);
if tsNext is not None:
dParams[WuiDispatcherBase.ksParamEffectiveDate] = str(tsNext);
sNext = ' <a href="?%s" title="One period later">>></a>' \
% (webutils.encodeUrlParams(dParams),);
else:
sNext = ' >>';
return self._generateTimeSelector(self.getParameters(), sPrev, sNext);
def _generateResultPeriodSelector(self, dParams, sCurPeriod):
"""
Generate HTML code for result period selector.
"""
if self.ksParamEffectivePeriod in dParams:
del dParams[self.ksParamEffectivePeriod];
# Forget about page No when changing a period
if WuiDispatcherBase.ksParamPageNo in dParams:
del dParams[WuiDispatcherBase.ksParamPageNo]
sHtmlPeriodSelector = '<form name="PeriodForm" method="GET">\n'
sHtmlPeriodSelector += ' Period is\n'
sHtmlPeriodSelector += ' <select name="%s" onchange="window.location=' % self.ksParamEffectivePeriod
sHtmlPeriodSelector += '\'?%s&%s=\' + ' % (webutils.encodeUrlParams(dParams), self.ksParamEffectivePeriod)
sHtmlPeriodSelector += 'this.options[this.selectedIndex].value;">\n'
for sPeriodValue, sPeriodCaption, _ in self.kaoResultPeriods:
sHtmlPeriodSelector += ' <option value="%s"%s>%s</option>\n' \
% (webutils.quoteUrl(sPeriodValue),
' selected="selected"' if sPeriodValue == sCurPeriod else '',
sPeriodCaption)
sHtmlPeriodSelector += ' </select>\n' \
'</form>\n'
return sHtmlPeriodSelector
def _generateGroupContentSelector(self, aoGroupMembers, iCurrentMember, sAltAction):
"""
Generate HTML code for group content selector.
"""
dParams = self.getParameters()
if self.ksParamGroupMemberId in dParams:
del dParams[self.ksParamGroupMemberId]
if sAltAction is not None:
if self.ksParamAction in dParams:
del dParams[self.ksParamAction];
dParams[self.ksParamAction] = sAltAction;
sHtmlSelector = '<form name="GroupContentForm" method="GET">\n'
sHtmlSelector += ' <select name="%s" onchange="window.location=' % self.ksParamGroupMemberId
sHtmlSelector += '\'?%s&%s=\' + ' % (webutils.encodeUrlParams(dParams), self.ksParamGroupMemberId)
sHtmlSelector += 'this.options[this.selectedIndex].value;">\n'
sHtmlSelector += '<option value="-1">All</option>\n'
for iGroupMemberId, sGroupMemberName in aoGroupMembers:
if iGroupMemberId is not None:
sHtmlSelector += ' <option value="%s"%s>%s</option>\n' \
% (iGroupMemberId,
' selected="selected"' if iGroupMemberId == iCurrentMember else '',
sGroupMemberName)
sHtmlSelector += ' </select>\n' \
'</form>\n'
return sHtmlSelector
def _generatePagesSelector(self, dParams, cItems, cItemsPerPage, iPage):
"""
Generate HTML code for pages (1, 2, 3 ... N) selector
"""
if WuiDispatcherBase.ksParamPageNo in dParams:
del dParams[WuiDispatcherBase.ksParamPageNo]
sHrefPtr = '<a href="?%s&%s=' % (webutils.encodeUrlParams(dParams).replace('%', '%%'),
WuiDispatcherBase.ksParamPageNo)
sHrefPtr += '%d">%s</a>'
cNumOfPages = (cItems + cItemsPerPage - 1) / cItemsPerPage;
cPagesToDisplay = 10
cPagesRangeStart = iPage - cPagesToDisplay / 2 \
if not iPage - cPagesToDisplay / 2 < 0 else 0
cPagesRangeEnd = cPagesRangeStart + cPagesToDisplay \
if not cPagesRangeStart + cPagesToDisplay > cNumOfPages else cNumOfPages
# Adjust pages range
if cNumOfPages < cPagesToDisplay:
cPagesRangeStart = 0
cPagesRangeEnd = cNumOfPages
# 1 2 3 4...
sHtmlPager = ' \n'.join(sHrefPtr % (x, str(x + 1)) if x != iPage else str(x + 1)
for x in range(cPagesRangeStart, cPagesRangeEnd))
if cPagesRangeStart > 0:
sHtmlPager = '%s ... \n' % (sHrefPtr % (0, str(1))) + sHtmlPager
if cPagesRangeEnd < cNumOfPages:
sHtmlPager += ' ... %s\n' % (sHrefPtr % (cNumOfPages, str(cNumOfPages + 1)))
# Prev/Next (using << >> because « and » are too tiny).
if iPage > 0:
dParams[WuiDispatcherBase.ksParamPageNo] = iPage - 1
sHtmlPager = ('<a title="Previous page" href="?%s"><<</a> \n'
% (webutils.encodeUrlParams(dParams), )) \
+ sHtmlPager;
else:
sHtmlPager = '<< \n' + sHtmlPager
if iPage + 1 < cNumOfPages:
dParams[WuiDispatcherBase.ksParamPageNo] = iPage + 1
sHtmlPager += '\n <a title="Next page" href="?%s">>></a>\n' % (webutils.encodeUrlParams(dParams),)
else:
sHtmlPager += '\n >>\n'
return sHtmlPager
def _generateItemPerPageSelector(self, dParams, cItemsPerPage):
"""
Generate HTML code for items per page selector
"""
if WuiDispatcherBase.ksParamItemsPerPage in dParams:
del dParams[WuiDispatcherBase.ksParamItemsPerPage]
# Forced reset of the page number
dParams[WuiDispatcherBase.ksParamPageNo] = 0
sHtmlItemsPerPageSelector = '<form name="AgesPerPageForm" method="GET">\n' \
' Max <select name="%s" onchange="window.location=\'?%s&%s=\' + ' \
'this.options[this.selectedIndex].value;" title="Max items per page">\n' \
% (WuiDispatcherBase.ksParamItemsPerPage,
webutils.encodeUrlParams(dParams),
WuiDispatcherBase.ksParamItemsPerPage)
aiItemsPerPage = [16, 32, 64, 128, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096];
for iItemsPerPage in aiItemsPerPage:
sHtmlItemsPerPageSelector += ' <option value="%d" %s>%d</option>\n' \
% (iItemsPerPage,
'selected="selected"' if iItemsPerPage == cItemsPerPage else '',
iItemsPerPage)
sHtmlItemsPerPageSelector += ' </select> items per page\n' \
'</form>\n'
return sHtmlItemsPerPageSelector
def _generateResultNavigation(self, cItems, cItemsPerPage, iPage, tsEffective, sCurPeriod, fOnlyFailures,
sHtmlMemberSelector):
""" Make custom time navigation bar for the results. """
# Generate the elements.
sHtmlStatusSelector = self._generateStatusSelector(self.getParameters(), fOnlyFailures);
sHtmlPeriodSelector = self._generateResultPeriodSelector(self.getParameters(), sCurPeriod)
sHtmlTimeWalker = self._generateTimeWalker(self.getParameters(), tsEffective, sCurPeriod);
if cItems > 0:
sHtmlPager = self._generatePagesSelector(self.getParameters(), cItems, cItemsPerPage, iPage)
sHtmlItemsPerPageSelector = self._generateItemPerPageSelector(self.getParameters(), cItemsPerPage)
else:
sHtmlPager = ''
sHtmlItemsPerPageSelector = ''
# Generate navigation bar
sHtml = '<table width=100%>\n' \
'<tr>\n' \
' <td width=30%>' + sHtmlMemberSelector + '</td>\n' \
' <td width=40% align=center>' + sHtmlTimeWalker + '</td>' \
' <td width=30% align=right>\n' + sHtmlPeriodSelector + '</td>\n' \
'</tr>\n' \
'<tr>\n' \
' <td width=30%>' + sHtmlStatusSelector + '</td>\n' \
' <td width=40% align=center>\n' + sHtmlPager + '</td>\n' \
' <td width=30% align=right>\n' + sHtmlItemsPerPageSelector + '</td>\n'\
'</tr>\n' \
'</table>\n'
return sHtml
def _generateReportNavigation(self, tsEffective, cHoursPerPeriod, cPeriods):
""" Make time navigation bar for the reports. """
# The period length selector.
dParams = self.getParameters();
if WuiMain.ksParamReportPeriodInHours in dParams:
del dParams[WuiMain.ksParamReportPeriodInHours];
sHtmlPeriodLength = '';
sHtmlPeriodLength += '<form name="ReportPeriodInHoursForm" method="GET">\n' \
' Period length <select name="%s" onchange="window.location=\'?%s&%s=\' + ' \
'this.options[this.selectedIndex].value;" title="Statistics period length in hours.">\n' \
% (WuiMain.ksParamReportPeriodInHours,
webutils.encodeUrlParams(dParams),
WuiMain.ksParamReportPeriodInHours)
for cHours in [ 2, 3, 4, 5, 6, 7, 8, 9, 12, 18, 24, 48, 72, 96, 120, 144, 168 ]:
sHtmlPeriodLength += ' <option value="%d"%s>%d hours</option>\n' \
% (cHours, 'selected="selected"' if cHours == cHoursPerPeriod else '', cHours);
sHtmlPeriodLength += ' </select>\n' \
'</form>\n'
# The period count selector.
dParams = self.getParameters();
if WuiMain.ksParamReportPeriods in dParams:
del dParams[WuiMain.ksParamReportPeriods];
sHtmlCountOfPeriods = '';
sHtmlCountOfPeriods += '<form name="ReportPeriodsForm" method="GET">\n' \
' Periods <select name="%s" onchange="window.location=\'?%s&%s=\' + ' \
'this.options[this.selectedIndex].value;" title="Statistics periods to report.">\n' \
% (WuiMain.ksParamReportPeriods,
webutils.encodeUrlParams(dParams),
WuiMain.ksParamReportPeriods)
for cCurPeriods in range(2, 43):
sHtmlCountOfPeriods += ' <option value="%d"%s>%d</option>\n' \
% (cCurPeriods, 'selected="selected"' if cCurPeriods == cPeriods else '', cCurPeriods);
sHtmlCountOfPeriods += ' </select>\n' \
'</form>\n'
# The time walker.
sHtmlTimeWalker = self._generateTimeWalker(self.getParameters(), tsEffective, '%d hours' % (cHoursPerPeriod));
# Combine them all.
sHtml = '<table width=100%>\n' \
' <tr>\n' \
' <td width=30% align="center">\n' + sHtmlPeriodLength + '</td>\n' \
' <td width=40% align="center">\n' + sHtmlTimeWalker + '</td>' \
' <td width=30% align="center">\n' + sHtmlCountOfPeriods + '</td>\n' \
' </tr>\n' \
'</table>\n';
return sHtml;
#
# The rest of stuff
#
def _actionGroupedResultsListing( #pylint: disable=R0914
self,
enmResultsGroupingType,
oResultsLogicType,
oResultsListContentType):
"""
Override generic listing action.
oLogicType implements fetchForListing.
oListContentType is a child of WuiListContentBase.
"""
cItemsPerPage = self.getIntParam(self.ksParamItemsPerPage, iMin = 2, iMax = 9999, iDefault = 128)
iPage = self.getIntParam(self.ksParamPageNo, iMin = 0, iMax = 999999, iDefault = 0)
tsEffective = self.getEffectiveDateParam()
iGroupMemberId = self.getIntParam(self.ksParamGroupMemberId, iMin = -1, iMax = 999999, iDefault = -1)
fOnlyFailures = self.getBoolParam(self.ksParamOnlyFailures, fDefault = False);
# Get testing results period and validate it
asValidValues = [x for (x, _, _) in self.kaoResultPeriods]
sCurPeriod = self.getStringParam(self.ksParamEffectivePeriod, asValidValues = asValidValues,
sDefault = self.ksResultPeriodDefault)
assert sCurPeriod != ''; # Impossible!
self._checkForUnknownParameters()
#
# Fetch the group members.
#
# If no grouping is selected, we'll fill the the grouping combo with
# testboxes just to avoid having completely useless combo box.
#
oTrLogic = TestResultLogic(self._oDb);
sAltSelectorAction = None;
if enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeNone \
or enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeTestBox:
aoTmp = oTrLogic.getTestBoxes(tsNow = tsEffective, sPeriod = sCurPeriod)
aoGroupMembers = sorted(list(set([ (x.idTestBox, '%s (%s)' % (x.sName, str(x.ip))) for x in aoTmp ])),
reverse = False, key = lambda asData: asData[1])
if enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeTestBox:
self._sPageTitle = 'Grouped by Test Box';
else:
self._sPageTitle = 'Ungrouped results';
sAltSelectorAction = self.ksActionResultsGroupedByTestBox;
aoGroupMembers.insert(0, [None, None]); # The "All" member.
elif enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeTestGroup:
aoTmp = oTrLogic.getTestGroups(tsNow = tsEffective, sPeriod = sCurPeriod);
aoGroupMembers = sorted(list(set([ (x.idTestGroup, x.sName ) for x in aoTmp ])),
reverse = False, key = lambda asData: asData[1])
self._sPageTitle = 'Grouped by Test Group'
elif enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeBuildRev:
aoTmp = oTrLogic.getBuilds(tsNow = tsEffective, sPeriod = sCurPeriod)
aoGroupMembers = sorted(list(set([ (x.iRevision, '%s.%d' % (x.oCat.sBranch, x.iRevision)) for x in aoTmp ])),
reverse = True, key = lambda asData: asData[0])
self._sPageTitle = 'Grouped by Build'
elif enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeTestCase:
aoTmp = oTrLogic.getTestCases(tsNow = tsEffective, sPeriod = sCurPeriod)
aoGroupMembers = sorted(list(set([ (x.idTestCase, '%s' % x.sName) for x in aoTmp ])),
reverse = False, key = lambda asData: asData[1])
self._sPageTitle = 'Grouped by Test Case'
elif enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeSchedGroup:
aoTmp = oTrLogic.getSchedGroups(tsNow = tsEffective, sPeriod = sCurPeriod)
aoGroupMembers = sorted(list(set([ (x.idSchedGroup, '%s' % x.sName) for x in aoTmp ])),
reverse = False, key = lambda asData: asData[1])
self._sPageTitle = 'Grouped by Scheduling Group'
else:
raise TMExceptionBase('Unknown grouping type')
_sPageBody = ''
oContent = None
cEntriesMax = 0
_dParams = self.getParameters()
for idMember, sMemberName in aoGroupMembers:
#
# Count and fetch entries to be displayed.
#
# Skip group members that were not specified.
if idMember != iGroupMemberId \
and ( (idMember is not None and enmResultsGroupingType == TestResultLogic.ksResultsGroupingTypeNone)
or (iGroupMemberId > 0 and enmResultsGroupingType != TestResultLogic.ksResultsGroupingTypeNone) ):
continue
oResultLogic = oResultsLogicType(self._oDb);
cEntries = oResultLogic.getEntriesCount(tsNow = tsEffective,
sInterval = sCurPeriod,
enmResultsGroupingType = enmResultsGroupingType,
iResultsGroupingValue = idMember,
fOnlyFailures = fOnlyFailures);
if cEntries == 0: # Do not display empty groups
continue
aoEntries = oResultLogic.fetchResultsForListing(iPage * cItemsPerPage,
cItemsPerPage,
tsNow = tsEffective,
sInterval = sCurPeriod,
enmResultsGroupingType = enmResultsGroupingType,
iResultsGroupingValue = idMember,
fOnlyFailures = fOnlyFailures)
cEntriesMax = max(cEntriesMax, cEntries)
#
# Format them.
#
oContent = oResultsListContentType(aoEntries,
cEntries,
iPage,
cItemsPerPage,
tsEffective,
fnDPrint = self._oSrvGlue.dprint,
oDisp = self)
(_, sHtml) = oContent.show(fShowNavigation = False)
if sMemberName is not None:
_sPageBody += '<table width=100%><tr><td>'
_dParams[self.ksParamGroupMemberId] = idMember
sLink = WuiTmLink(sMemberName, '', _dParams, fBracketed = False).toHtml()
_sPageBody += '<h2>%s (%d)</h2></td>' % (sLink, cEntries)
_sPageBody += '<td><br></td>'
_sPageBody += '</tr></table>'
_sPageBody += sHtml
_sPageBody += '<br>'
#
# Complete the page by slapping navigation controls at the top and
# bottom of it.
#
sHtmlNavigation = self._generateResultNavigation(cEntriesMax, cItemsPerPage, iPage,
tsEffective, sCurPeriod, fOnlyFailures,
self._generateGroupContentSelector(aoGroupMembers, iGroupMemberId,
sAltSelectorAction));
if cEntriesMax > 0:
self._sPageBody = sHtmlNavigation + _sPageBody + sHtmlNavigation;
else:
self._sPageBody = sHtmlNavigation + '<p align="center"><i>No data to display</i></p>\n';
return True;
def _generatePage(self):
"""Override parent handler in order to change page title."""
if self._sPageTitle is not None:
self._sPageTitle = 'Test Results - ' + self._sPageTitle
return WuiDispatcherBase._generatePage(self)
def actionTestResultDetails(self):
"""Show test case execution result details."""
from testmanager.webui.wuitestresult import WuiTestResult;
self._sTemplate = 'template-details.html';
idTestSet = self.getIntParam(TestSetData.ksParam_idTestSet);
self._checkForUnknownParameters()
oTestSetData = TestSetData().initFromDbWithId(self._oDb, idTestSet);
try:
(oTestResultTree, _) = TestResultLogic(self._oDb).fetchResultTree(idTestSet);
except TMTooManyRows:
(oTestResultTree, _) = TestResultLogic(self._oDb).fetchResultTree(idTestSet, 2);
oBuildDataEx = BuildDataEx().initFromDbWithId(self._oDb, oTestSetData.idBuild, oTestSetData.tsCreated);
try: oBuildValidationKitDataEx = BuildDataEx().initFromDbWithId(self._oDb, oTestSetData.idBuildTestSuite,
oTestSetData.tsCreated);
except: oBuildValidationKitDataEx = None;
oTestBoxData = TestBoxData().initFromDbWithGenId(self._oDb, oTestSetData.idGenTestBox);
oTestGroupData = TestGroupData().initFromDbWithId(self._oDb, ## @todo This bogus time wise. Bad DB design?
oTestSetData.idTestGroup, oTestSetData.tsCreated);
oTestCaseDataEx = TestCaseDataEx().initFromDbWithGenId(self._oDb, oTestSetData.idGenTestCase,
oTestSetData.tsConfig);
oTestCaseArgsDataEx = TestCaseArgsDataEx().initFromDbWithGenIdEx(self._oDb, oTestSetData.idGenTestCaseArgs,
oTestSetData.tsConfig);
oContent = WuiTestResult(oDisp = self, fnDPrint = self._oSrvGlue.dprint);
(self._sPageTitle, self._sPageBody) = oContent.showTestCaseResultDetails(oTestResultTree,
oTestSetData,
oBuildDataEx,
oBuildValidationKitDataEx,
oTestBoxData,
oTestGroupData,
oTestCaseDataEx,
oTestCaseArgsDataEx);
return True
def actionViewLog(self):
"""
Log viewer action.
"""
from testmanager.webui.wuilogviewer import WuiLogViewer;
self._sTemplate = 'template-details.html'; ## @todo create new template (background color, etc)
idTestSet = self.getIntParam(self.ksParamLogSetId, iMin = 1);
idLogFile = self.getIntParam(self.ksParamLogFileId, iMin = 0, iDefault = 0);
cbChunk = self.getIntParam(self.ksParamLogChunkSize, iMin = 256, iMax = 16777216, iDefault = 65536);
iChunk = self.getIntParam(self.ksParamLogChunkNo, iMin = 0,
iMax = config.g_kcMbMaxMainLog * 1048576 / cbChunk, iDefault = 0);
self._checkForUnknownParameters();
oTestSet = TestSetData().initFromDbWithId(self._oDb, idTestSet);
if idLogFile == 0:
oTestFile = TestResultFileDataEx().initFakeMainLog(oTestSet);
else:
oTestFile = TestSetLogic(self._oDb).getFile(idTestSet, idLogFile);
if oTestFile.sMime not in [ 'text/plain',]:
raise WuiException('The log view does not display files of type: %s' % (oTestFile.sMime,));
oContent = WuiLogViewer(oTestSet, oTestFile, cbChunk, iChunk, oDisp = self, fnDPrint = self._oSrvGlue.dprint);
(self._sPageTitle, self._sPageBody) = oContent.show();
return True;
def actionGetFile(self):
"""
Get file action.
"""
idTestSet = self.getIntParam(self.ksParamGetFileSetId, iMin = 1);
idFile = self.getIntParam(self.ksParamGetFileId, iMin = 0, iDefault = 0);
fDownloadIt = self.getBoolParam(self.ksParamGetFileDownloadIt, fDefault = True);
self._checkForUnknownParameters();
#
# Get the file info and open it.
#
oTestSet = TestSetData().initFromDbWithId(self._oDb, idTestSet);
if idFile == 0:
oTestFile = TestResultFileDataEx().initFakeMainLog(oTestSet);
else:
oTestFile = TestSetLogic(self._oDb).getFile(idTestSet, idFile);
(oFile, oSizeOrError, _) = oTestSet.openFile(oTestFile.sFile, 'rb');
if oFile is None:
raise Exception(oSizeOrError);
#
# Send the file.
#
self._oSrvGlue.setHeaderField('Content-Type', oTestFile.getMimeWithEncoding());
if fDownloadIt:
self._oSrvGlue.setHeaderField('Content-Disposition', 'attachment; filename="TestSet-%d-%s"'
% (idTestSet, oTestFile.sFile,));
while True:
abChunk = oFile.read(262144);
if len(abChunk) == 0:
break;
self._oSrvGlue.writeRaw(abChunk);
return self.ksDispatchRcAllDone;
def _actionGenericReport(self, oModelType, oReportType):
"""
Generic report action.
oReportType is a child of WuiReportContentBase.
oModelType is a child of ReportModelBase.
"""
tsEffective = self.getEffectiveDateParam();
cPeriods = self.getIntParam(self.ksParamReportPeriods, iMin = 2, iMax = 99, iDefault = 7);
cHoursPerPeriod = self.getIntParam(self.ksParamReportPeriodInHours, iMin = 1, iMax = 168, iDefault = 24);
sSubject = self.getStringParam(self.ksParamReportSubject, ReportModelBase.kasSubjects,
ReportModelBase.ksSubEverything);
if sSubject == ReportModelBase.ksSubEverything:
aidSubjects = self.getListOfIntParams(self.ksParamReportSubjectIds, aiDefaults = []);
else:
aidSubjects = self.getListOfIntParams(self.ksParamReportSubjectIds, iMin = 1);
if aidSubjects is None:
raise WuiException('Missing parameter %s' % (self.ksParamReportSubjectIds,));
self._checkForUnknownParameters();
dParams = \
{
self.ksParamEffectiveDate: tsEffective,
self.ksParamReportPeriods: cPeriods,
self.ksParamReportPeriodInHours: cHoursPerPeriod,
self.ksParamReportSubject: sSubject,
self.ksParamReportSubjectIds: aidSubjects,
};
oModel = oModelType(self._oDb, tsEffective, cPeriods, cHoursPerPeriod, sSubject, aidSubjects);
oContent = oReportType(oModel, dParams, fSubReport = False, fnDPrint = self._oSrvGlue.dprint, oDisp = self);
(self._sPageTitle, self._sPageBody) = oContent.show();
sNavi = self._generateReportNavigation(tsEffective, cHoursPerPeriod, cPeriods);
self._sPageBody = sNavi + self._sPageBody;
return True;
def _actionGraphWiz(self):
"""
Graph wizard action.
"""
from testmanager.webui.wuigraphwiz import WuiGraphWiz;
self._sTemplate = 'template-graphwiz.html';
tsEffective = self.getEffectiveDateParam();
cPeriods = self.getIntParam(self.ksParamReportPeriods, iMin = 1, iMax = 1, iDefault = 1); # Not needed yet.
sTmp = self.getStringParam(self.ksParamReportPeriodInHours, sDefault = '3 weeks');
(cHoursPerPeriod, sError) = utils.parseIntervalHours(sTmp);
if sError is not None: raise WuiException(sError);
asSubjectIds = self.getListOfStrParams(self.ksParamReportSubjectIds);
sSubject = self.getStringParam(self.ksParamReportSubject, [ReportModelBase.ksSubEverything],
ReportModelBase.ksSubEverything); # dummy
aidTestBoxes = self.getListOfIntParams(self.ksParamGraphWizTestBoxIds, iMin = 1, aiDefaults = []);
aidBuildCats = self.getListOfIntParams(self.ksParamGraphWizBuildCatIds, iMin = 1, aiDefaults = []);
aidTestCases = self.getListOfIntParams(self.ksParamGraphWizTestCaseIds, iMin = 1, aiDefaults = []);
fSepTestVars = self.getBoolParam(self.ksParamGraphWizSepTestVars, fDefault = False);
enmGraphImpl = self.getStringParam(self.ksParamGraphWizImpl, asValidValues = self.kasGraphWizImplValid,
sDefault = self.ksGraphWizImpl_Default);
cx = self.getIntParam(self.ksParamGraphWizWidth, iMin = 128, iMax = 8192, iDefault = 1280);
cy = self.getIntParam(self.ksParamGraphWizHeight, iMin = 128, iMax = 8192, iDefault = int(cx * 5 / 16) );
cDotsPerInch = self.getIntParam(self.ksParamGraphWizDpi, iMin = 64, iMax = 512, iDefault = 96);
cPtFont = self.getIntParam(self.ksParamGraphWizFontSize, iMin = 6, iMax = 32, iDefault = 8);
fErrorBarY = self.getBoolParam(self.ksParamGraphWizErrorBarY, fDefault = False);
cMaxErrorBarY = self.getIntParam(self.ksParamGraphWizMaxErrorBarY, iMin = 8, iMax = 9999999, iDefault = 18);
cMaxPerGraph = self.getIntParam(self.ksParamGraphWizMaxPerGraph, iMin = 1, iMax = 24, iDefault = 8);
fXkcdStyle = self.getBoolParam(self.ksParamGraphWizXkcdStyle, fDefault = False);
fTabular = self.getBoolParam(self.ksParamGraphWizTabular, fDefault = False);
idSrcTestSet = self.getIntParam(self.ksParamGraphWizSrcTestSetId, iDefault = None);
self._checkForUnknownParameters();
dParams = \
{
self.ksParamEffectiveDate: tsEffective,
self.ksParamReportPeriods: cPeriods,
self.ksParamReportPeriodInHours: cHoursPerPeriod,
self.ksParamReportSubject: sSubject,
self.ksParamReportSubjectIds: asSubjectIds,
self.ksParamGraphWizTestBoxIds: aidTestBoxes,
self.ksParamGraphWizBuildCatIds: aidBuildCats,
self.ksParamGraphWizTestCaseIds: aidTestCases,
self.ksParamGraphWizSepTestVars: fSepTestVars,
self.ksParamGraphWizImpl: enmGraphImpl,
self.ksParamGraphWizWidth: cx,
self.ksParamGraphWizHeight: cy,
self.ksParamGraphWizDpi: cDotsPerInch,
self.ksParamGraphWizFontSize: cPtFont,
self.ksParamGraphWizErrorBarY: fErrorBarY,
self.ksParamGraphWizMaxErrorBarY: cMaxErrorBarY,
self.ksParamGraphWizMaxPerGraph: cMaxPerGraph,
self.ksParamGraphWizXkcdStyle: fXkcdStyle,
self.ksParamGraphWizTabular: fTabular,
self.ksParamGraphWizSrcTestSetId: idSrcTestSet,
};
oModel = ReportGraphModel(self._oDb, tsEffective, cPeriods, cHoursPerPeriod, sSubject, asSubjectIds,
aidTestBoxes, aidBuildCats, aidTestCases, fSepTestVars);
oContent = WuiGraphWiz(oModel, dParams, fSubReport = False, fnDPrint = self._oSrvGlue.dprint, oDisp = self);
(self._sPageTitle, self._sPageBody) = oContent.show();
return True;
def _actionVcsHistoryTooltip(self):
"""
Version control system history.
"""
self._sTemplate = 'template-tooltip.html';
from testmanager.webui.wuivcshistory import WuiVcsHistoryTooltip;
iRevision = self.getIntParam(self.ksParamVcsHistoryRevision, iMin = 0, iMax = 999999999);
sRepository = self.getStringParam(self.ksParamVcsHistoryRepository);
cEntries = self.getIntParam(self.ksParamVcsHistoryEntries, iMin = 1, iMax = 1024, iDefault = 8);
self._checkForUnknownParameters();
aoEntries = VcsRevisionLogic(self._oDb).fetchTimeline(sRepository, iRevision, cEntries);
oContent = WuiVcsHistoryTooltip(aoEntries, sRepository, iRevision, cEntries,
fnDPrint = self._oSrvGlue.dprint, oDisp = self);
(self._sPageTitle, self._sPageBody) = oContent.show();
return True;
| gpl-2.0 |
ktaneishi/deepchem | deepchem/data/datasets.py | 1 | 49021 | """
Contains wrapper class for datasets.
"""
from __future__ import division
from __future__ import unicode_literals
import json
import os
import math
import deepchem as dc
import numpy as np
import pandas as pd
import random
from deepchem.utils.save import save_to_disk, save_metadata
from deepchem.utils.save import load_from_disk
from deepchem.utils.save import log
from pandas import read_hdf
import tempfile
import time
import shutil
import json
from multiprocessing.dummy import Pool
import warnings
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
def sparsify_features(X):
"""Extracts a sparse feature representation from dense feature array."""
n_samples = len(X)
X_sparse = []
for i in range(n_samples):
nonzero_inds = np.nonzero(X[i])[0]
nonzero_vals = X[i][nonzero_inds]
X_sparse.append((nonzero_inds, nonzero_vals))
X_sparse = np.array(X_sparse, dtype=object)
return X_sparse
def densify_features(X_sparse, num_features):
"""Expands sparse feature representation to dense feature array."""
n_samples = len(X_sparse)
X = np.zeros((n_samples, num_features))
for i in range(n_samples):
nonzero_inds, nonzero_vals = X_sparse[i]
X[i][nonzero_inds.astype(int)] = nonzero_vals
return X
def pad_features(batch_size, X_b):
"""Pads a batch of features to have precisely batch_size elements.
Version of pad_batch for use at prediction time.
"""
num_samples = len(X_b)
if num_samples == batch_size:
return X_b
else:
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
# Fill in batch arrays
start = 0
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
start += increment
return X_out
def pad_batch(batch_size, X_b, y_b, w_b, ids_b):
"""Pads batch to have size precisely batch_size elements.
Fills in batch by wrapping around samples till whole batch is filled.
"""
num_samples = len(X_b)
if num_samples == batch_size:
return (X_b, y_b, w_b, ids_b)
# By invariant of when this is called, can assume num_samples > 0
# and num_samples < batch_size
if len(X_b.shape) > 1:
feature_shape = X_b.shape[1:]
X_out = np.zeros((batch_size,) + feature_shape, dtype=X_b.dtype)
else:
X_out = np.zeros((batch_size,), dtype=X_b.dtype)
if y_b is None:
y_out = None
elif len(y_b.shape) < 2:
y_out = np.zeros(batch_size, dtype=y_b.dtype)
else:
y_out = np.zeros((batch_size,) + y_b.shape[1:], dtype=y_b.dtype)
if w_b is None:
w_out = None
elif len(w_b.shape) < 2:
w_out = np.zeros(batch_size, dtype=w_b.dtype)
else:
w_out = np.zeros((batch_size,) + w_b.shape[1:], dtype=w_b.dtype)
ids_out = np.zeros((batch_size,), dtype=ids_b.dtype)
# Fill in batch arrays
start = 0
# Only the first set of copy will be counted in training loss
if w_out is not None:
w_out[start:start + num_samples] = w_b[:]
while start < batch_size:
num_left = batch_size - start
if num_left < num_samples:
increment = num_left
else:
increment = num_samples
X_out[start:start + increment] = X_b[:increment]
if y_out is not None:
y_out[start:start + increment] = y_b[:increment]
ids_out[start:start + increment] = ids_b[:increment]
start += increment
return (X_out, y_out, w_out, ids_out)
class Dataset(object):
"""Abstract base class for datasets defined by X, y, w elements."""
def __init__(self):
raise NotImplementedError()
def __len__(self):
"""
Get the number of elements in the dataset.
"""
raise NotImplementedError()
def get_shape(self):
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
raise NotImplementedError()
def get_task_names(self):
"""Get the names of the tasks associated with this dataset."""
raise NotImplementedError()
@property
def X(self):
"""Get the X vector for this dataset as a single numpy array."""
raise NotImplementedError()
@property
def y(self):
"""Get the y vector for this dataset as a single numpy array."""
raise NotImplementedError()
@property
def ids(self):
"""Get the ids vector for this dataset as a single numpy array."""
raise NotImplementedError()
@property
def w(self):
"""Get the weight vector for this dataset as a single numpy array."""
raise NotImplementedError()
def iterbatches(self,
batch_size=None,
epoch=0,
deterministic=False,
pad_batches=False):
"""
Parameters
----------
Returns
-------
"""
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: (X, y, w, ids).
"""
raise NotImplementedError()
def itersamples(self):
"""Get an object that iterates over the samples in the dataset.
Example:
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
raise NotImplementedError()
def transform(self, fn, **args):
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times with
different subsets of the data. Each time it is called, it should transform
the samples and return the transformed data.
Parameters
----------
fn: function
A function to apply to each sample in the dataset
Returns
-------
a newly constructed Dataset object
"""
raise NotImplementedError()
def get_statistics(self, X_stats=True, y_stats=True):
"""Compute and return statistics of this dataset."""
X_means = 0.0
X_m2 = 0.0
y_means = 0.0
y_m2 = 0.0
n = 0
for X, y, _, _ in self.itersamples():
n += 1
if X_stats:
dx = X - X_means
X_means += dx / n
X_m2 += dx * (X - X_means)
if y_stats:
dy = y - y_means
y_means += dy / n
y_m2 += dy * (y - y_means)
if n < 2:
X_stds = 0.0
y_stds = 0
else:
X_stds = np.sqrt(X_m2 / n)
y_stds = np.sqrt(y_m2 / n)
if X_stats and not y_stats:
return X_means, X_stds
elif y_stats and not X_stats:
return y_means, y_stds
elif X_stats and y_stats:
return X_means, X_stds, y_means, y_stds
else:
return None
def make_iterator(self,
batch_size=100,
epochs=1,
deterministic=False,
pad_batches=False):
"""Create a tf.data.Iterator that iterates over the data in this Dataset.
The iterator's get_next() method returns a tuple of three tensors (X, y, w)
which can be used to retrieve the features, labels, and weights respectively.
Parameters
----------
batch_size: int
the number of samples to include in each batch
epochs: int
the number of times to iterate over the Dataset
deterministic: bool
if True, the data is produced in order. If False, a different random
permutation of the data is used for each epoch.
pad_batches: bool
if True, batches are padded as necessary to make the size of each batch
exactly equal batch_size.
"""
# Retrieve the first sample so we can determine the dtypes.
import tensorflow as tf
X, y, w, ids = next(self.itersamples())
dtypes = (tf.as_dtype(X.dtype), tf.as_dtype(y.dtype), tf.as_dtype(w.dtype))
shapes = (tf.TensorShape([None] + list(X.shape)),
tf.TensorShape([None] + list(y.shape)),
tf.TensorShape([None] + list(w.shape)))
# Create a Tensorflow Dataset and have it create an Iterator.
def gen_data():
for epoch in range(epochs):
for X, y, w, ids in self.iterbatches(batch_size, epoch, deterministic,
pad_batches):
yield (X, y, w)
dataset = tf.data.Dataset.from_generator(gen_data, dtypes, shapes)
return dataset.make_one_shot_iterator()
class NumpyDataset(Dataset):
"""A Dataset defined by in-memory numpy arrays."""
def __init__(self, X, y=None, w=None, ids=None, n_tasks=1):
n_samples = len(X)
if n_samples > 0:
if y is None:
# Set labels to be zero, with zero weights
y = np.zeros((n_samples, n_tasks))
w = np.zeros_like(y)
if ids is None:
ids = np.arange(n_samples)
if w is None:
w = np.ones_like(y)
if not isinstance(X, np.ndarray):
X = np.array(X)
if not isinstance(y, np.ndarray):
y = np.array(y)
if not isinstance(w, np.ndarray):
w = np.array(w)
self._X = X
self._y = y
self._w = w
self._ids = np.array(ids, dtype=object)
def __len__(self):
"""
Get the number of elements in the dataset.
"""
return len(self._y)
def get_shape(self):
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X.shape, self._y.shape, self._w.shape, self._ids.shape
def get_task_names(self):
"""Get the names of the tasks associated with this dataset."""
if len(self._y.shape) < 2:
return np.array([0])
return np.arange(self._y.shape[1])
@property
def X(self):
"""Get the X vector for this dataset as a single numpy array."""
return self._X
@property
def y(self):
"""Get the y vector for this dataset as a single numpy array."""
return self._y
@property
def ids(self):
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self):
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size=None,
epoch=0,
deterministic=False,
pad_batches=False):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: (X, y, w, ids).
"""
def iterate(dataset, batch_size, deterministic, pad_batches):
n_samples = dataset._X.shape[0]
if not deterministic:
sample_perm = np.random.permutation(n_samples)
else:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_batch = dataset._X[perm_indices]
y_batch = dataset._y[perm_indices]
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, deterministic, pad_batches)
def itersamples(self):
"""Get an object that iterates over the samples in the dataset.
Example:
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
n_samples = self._X.shape[0]
return ((self._X[i], self._y[i], self._w[i], self._ids[i])
for i in range(n_samples))
def transform(self, fn, **args):
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times with
different subsets of the data. Each time it is called, it should transform
the samples and return the transformed data.
Parameters
----------
fn: function
A function to apply to each sample in the dataset
Returns
-------
a newly constructed Dataset object
"""
newx, newy, neww = fn(self._X, self._y, self._w)
return NumpyDataset(newx, newy, neww, self._ids[:])
def select(self, indices, select_dir=None):
"""Creates a new dataset from a selection of indices from self.
TODO(rbharath): select_dir is here due to dc.splits always passing in
splits.
Parameters
----------
indices: list
List of indices to select.
select_dir: string
Ignored.
"""
X = self.X[indices]
y = self.y[indices]
w = self.w[indices]
ids = self.ids[indices]
return NumpyDataset(X, y, w, ids)
@staticmethod
def from_DiskDataset(ds):
"""
Parameters
----------
ds : DiskDataset
DiskDataset to transorm to NumpyDataset
Returns
-------
NumpyDataset
Data of ds as NumpyDataset
"""
return NumpyDataset(ds.X, ds.y, ds.w, ds.ids)
@staticmethod
def to_json(self, fname):
d = {
'X': self.X.tolist(),
'y': self.y.tolist(),
'w': self.w.tolist(),
'ids': self.ids.tolist()
}
with open(fname, 'w') as fout:
json.dump(d, fout)
@staticmethod
def from_json(fname):
with open(fname) as fin:
d = json.load(fin)
return NumpyDataset(d['X'], d['y'], d['w'], d['ids'])
@staticmethod
def merge(datasets):
"""
Parameters
----------
datasets: list of deepchem.data.NumpyDataset
list of datasets to merge
Returns
-------
Single deepchem.data.NumpyDataset with data concatenated over axis 0
"""
X, y, w, ids = datasets[0].X, datasets[0].y, datasets[0].w, datasets[0].ids
for dataset in datasets[1:]:
X = np.concatenate([X, dataset.X], axis=0)
y = np.concatenate([y, dataset.y], axis=0)
w = np.concatenate([w, dataset.w], axis=0)
ids = np.concatenate(
[ids, dataset.ids],
axis=0,
)
return NumpyDataset(X, y, w, ids, n_tasks=y.shape[1])
class DiskDataset(Dataset):
"""
A Dataset that is stored as a set of files on disk.
"""
def __init__(self, data_dir, verbose=True):
"""
Turns featurized dataframes into numpy files, writes them & metadata to disk.
"""
self.data_dir = data_dir
self.verbose = verbose
log("Loading dataset from disk.", self.verbose)
self.tasks, self.metadata_df = self.load_metadata()
@staticmethod
def create_dataset(shard_generator, data_dir=None, tasks=[], verbose=True):
"""Creates a new DiskDataset
Parameters
----------
shard_generator: Iterable
An iterable (either a list or generator) that provides tuples of data
(X, y, w, ids). Each tuple will be written to a separate shard on disk.
data_dir: str
Filename for data directory. Creates a temp directory if none specified.
tasks: list
List of tasks for this dataset.
"""
if data_dir is None:
data_dir = tempfile.mkdtemp()
elif not os.path.exists(data_dir):
os.makedirs(data_dir)
metadata_rows = []
time1 = time.time()
for shard_num, (X, y, w, ids) in enumerate(shard_generator):
basename = "shard-%d" % shard_num
metadata_rows.append(
DiskDataset.write_data_to_disk(data_dir, basename, tasks, X, y, w,
ids))
metadata_df = DiskDataset._construct_metadata(metadata_rows)
save_metadata(tasks, metadata_df, data_dir)
time2 = time.time()
log("TIMING: dataset construction took %0.3f s" % (time2 - time1), verbose)
return DiskDataset(data_dir, verbose=verbose)
def load_metadata(self):
try:
tasks_filename, metadata_filename = self._get_metadata_filename()
with open(tasks_filename) as fin:
tasks = json.load(fin)
metadata_df = pd.read_csv(metadata_filename, compression='gzip')
metadata_df = metadata_df.where((pd.notnull(metadata_df)), None)
return tasks, metadata_df
except Exception as e:
pass
# Load obsolete format -> save in new format
metadata_filename = os.path.join(self.data_dir, "metadata.joblib")
if os.path.exists(metadata_filename):
tasks, metadata_df = load_from_disk(metadata_filename)
del metadata_df['task_names']
del metadata_df['basename']
save_metadata(tasks, metadata_df, self.data_dir)
return tasks, metadata_df
raise ValueError("No Metadata Found On Disk")
@staticmethod
def _construct_metadata(metadata_entries):
"""Construct a dataframe containing metadata.
metadata_entries should have elements returned by write_data_to_disk
above.
"""
columns = ('ids', 'X', 'y', 'w')
metadata_df = pd.DataFrame(metadata_entries, columns=columns)
return metadata_df
@staticmethod
def write_data_to_disk(data_dir,
basename,
tasks,
X=None,
y=None,
w=None,
ids=None):
if X is not None:
out_X = "%s-X.joblib" % basename
save_to_disk(X, os.path.join(data_dir, out_X))
else:
out_X = None
if y is not None:
out_y = "%s-y.joblib" % basename
save_to_disk(y, os.path.join(data_dir, out_y))
else:
out_y = None
if w is not None:
out_w = "%s-w.joblib" % basename
save_to_disk(w, os.path.join(data_dir, out_w))
else:
out_w = None
if ids is not None:
out_ids = "%s-ids.joblib" % basename
save_to_disk(ids, os.path.join(data_dir, out_ids))
else:
out_ids = None
# note that this corresponds to the _construct_metadata column order
return [out_ids, out_X, out_y, out_w]
def save_to_disk(self):
"""Save dataset to disk."""
save_metadata(self.tasks, self.metadata_df, self.data_dir)
def move(self, new_data_dir):
"""Moves dataset to new directory."""
shutil.move(self.data_dir, new_data_dir)
self.data_dir = new_data_dir
def get_task_names(self):
"""
Gets learning tasks associated with this dataset.
"""
return self.tasks
# if not len(self.metadata_df):
# raise ValueError("No data in dataset.")
# return next(self.metadata_df.iterrows())[1]['task_names']
def reshard(self, shard_size):
"""Reshards data to have specified shard size."""
# Create temp directory to store resharded version
reshard_dir = tempfile.mkdtemp()
new_metadata = []
# Write data in new shards
def generator():
tasks = self.get_task_names()
X_next = np.zeros((0,) + self.get_data_shape())
y_next = np.zeros((0,) + (len(tasks),))
w_next = np.zeros((0,) + (len(tasks),))
ids_next = np.zeros((0,), dtype=object)
for (X, y, w, ids) in self.itershards():
X_next = np.concatenate([X_next, X], axis=0)
y_next = np.concatenate([y_next, y], axis=0)
w_next = np.concatenate([w_next, w], axis=0)
ids_next = np.concatenate([ids_next, ids])
while len(X_next) > shard_size:
X_batch, X_next = X_next[:shard_size], X_next[shard_size:]
y_batch, y_next = y_next[:shard_size], y_next[shard_size:]
w_batch, w_next = w_next[:shard_size], w_next[shard_size:]
ids_batch, ids_next = ids_next[:shard_size], ids_next[shard_size:]
yield (X_batch, y_batch, w_batch, ids_batch)
# Handle spillover from last shard
yield (X_next, y_next, w_next, ids_next)
resharded_dataset = DiskDataset.create_dataset(
generator(), data_dir=reshard_dir, tasks=self.tasks)
shutil.rmtree(self.data_dir)
shutil.move(reshard_dir, self.data_dir)
self.metadata_df = resharded_dataset.metadata_df
self.save_to_disk()
def get_data_shape(self):
"""
Gets array shape of datapoints in this dataset.
"""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
sample_X = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['X']))
return np.shape(sample_X)[1:]
def get_shard_size(self):
"""Gets size of shards on disk."""
if not len(self.metadata_df):
raise ValueError("No data in dataset.")
sample_y = load_from_disk(
os.path.join(self.data_dir,
next(self.metadata_df.iterrows())[1]['y']))
return len(sample_y)
def _get_metadata_filename(self):
"""
Get standard location for metadata file.
"""
metadata_filename = os.path.join(self.data_dir, "metadata.csv.gzip")
tasks_filename = os.path.join(self.data_dir, "tasks.json")
return tasks_filename, metadata_filename
def get_number_shards(self):
"""
Returns the number of shards for this dataset.
"""
return self.metadata_df.shape[0]
def itershards(self):
"""
Return an object that iterates over all shards in dataset.
Datasets are stored in sharded fashion on disk. Each call to next() for the
generator defined by this function returns the data from a particular shard.
The order of shards returned is guaranteed to remain fixed.
"""
def iterate(dataset):
for _, row in dataset.metadata_df.iterrows():
X = np.array(load_from_disk(os.path.join(dataset.data_dir, row['X'])))
ids = np.array(
load_from_disk(os.path.join(dataset.data_dir, row['ids'])),
dtype=object)
# These columns may be missing is the dataset is unlabelled.
if row['y'] is not None:
y = np.array(load_from_disk(os.path.join(dataset.data_dir, row['y'])))
else:
y = None
if row['w'] is not None:
w_filename = os.path.join(dataset.data_dir, row['w'])
if os.path.exists(w_filename):
w = np.array(load_from_disk(w_filename))
else:
w = np.ones(y.shape)
else:
w = None
yield (X, y, w, ids)
return iterate(self)
def iterbatches(self,
batch_size=None,
epoch=0,
deterministic=False,
pad_batches=False):
""" Get an object that iterates over minibatches from the dataset. It is guaranteed
that the number of batches returned is math.ceil(len(dataset)/batch_size).
Each minibatch is returned as a tuple of four numpy arrays: (X, y, w, ids).
Parameters:
-----------
batch_size: int
Number of elements in a batch. If None, then it yields batches with size equal to the size
of each individual shard.
epoch: int
Not used
deterministic: bool
Whether or not we should should shuffle each shard before generating the batches.
Note that this is only local in the sense that it does not ever mix between different
shards.
pad_batches: bool
Whether or not we should pad the last batch, globally, such that it has exactly batch_size
elements.
"""
def iterate(dataset, batch_size):
num_shards = dataset.get_number_shards()
if not deterministic:
shard_perm = np.random.permutation(num_shards)
else:
shard_perm = np.arange(num_shards)
# (ytz): Depending on the application, thread-based pools may be faster
# than process based pools, since process based pools need to pickle/serialize
# objects as an extra overhead. Also, as hideously as un-thread safe this looks,
# we're actually protected by the GIL.
pool = Pool(1) # mp.dummy aliases ThreadPool to Pool
next_shard = pool.apply_async(dataset.get_shard, (shard_perm[0],))
total_yield = 0
if batch_size is None:
num_global_batches = num_shards
else:
num_global_batches = math.ceil(dataset.get_shape()[0][0] / batch_size)
cur_global_batch = 0
cur_shard = 0
carry = None
while cur_global_batch < num_global_batches:
X, y, w, ids = next_shard.get()
if cur_shard < num_shards - 1:
next_shard = pool.apply_async(dataset.get_shard,
(shard_perm[cur_shard + 1],))
else:
pool.close()
if carry is not None:
X = np.concatenate([carry[0], X], axis=0)
if y is not None:
y = np.concatenate([carry[1], y], axis=0)
if w is not None:
w = np.concatenate([carry[2], w], axis=0)
ids = np.concatenate([carry[3], ids], axis=0)
carry = None
n_shard_samples = X.shape[0]
cur_local_batch = 0
if batch_size is None:
shard_batch_size = n_shard_samples
else:
shard_batch_size = batch_size
if n_shard_samples == 0:
cur_shard += 1
if batch_size is None:
cur_global_batch += 1
continue
num_local_batches = math.ceil(n_shard_samples / shard_batch_size)
if not deterministic:
sample_perm = np.random.permutation(n_shard_samples)
else:
sample_perm = np.arange(n_shard_samples)
while cur_local_batch < num_local_batches:
start = cur_local_batch * shard_batch_size
end = min(n_shard_samples, (cur_local_batch + 1) * shard_batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
X_b = X[perm_indices]
if y is not None:
y_b = y[perm_indices]
else:
y_b = None
if w is not None:
w_b = w[perm_indices]
else:
w_b = None
ids_b = ids[perm_indices]
assert len(X_b) <= shard_batch_size
if len(X_b) < shard_batch_size and cur_shard != num_shards - 1:
assert carry is None
carry = [X_b, y_b, w_b, ids_b]
else:
# (ytz): this skips everything except possibly the last shard
if pad_batches:
(X_b, y_b, w_b, ids_b) = pad_batch(shard_batch_size, X_b, y_b,
w_b, ids_b)
yield X_b, y_b, w_b, ids_b
cur_global_batch += 1
cur_local_batch += 1
cur_shard += 1
return iterate(self, batch_size)
def itersamples(self):
"""Get an object that iterates over the samples in the dataset.
Example:
>>> dataset = DiskDataset.from_numpy(np.ones((2,2)), np.ones((2,1)), verbose=False)
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [1.0] [1.0] 0
[1.0, 1.0] [1.0] [1.0] 1
"""
def iterate(dataset):
for (X_shard, y_shard, w_shard, ids_shard) in dataset.itershards():
n_samples = X_shard.shape[0]
for i in range(n_samples):
def sanitize(elem):
if elem is None:
return None
else:
return elem[i]
yield map(sanitize, [X_shard, y_shard, w_shard, ids_shard])
return iterate(self)
def transform(self, fn, **args):
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times with different
subsets of the data. Each time it is called, it should transform the samples and return
the transformed data.
Parameters
----------
fn: function
A function to apply to each sample in the dataset
out_dir: string
The directory to save the new dataset in. If this is omitted, a temporary directory
is created automatically
Returns
-------
a newly constructed Dataset object
"""
if 'out_dir' in args:
out_dir = args['out_dir']
else:
out_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
if 'verbose' in args:
verbose = args['verbose']
else:
verbose = True
def generator():
for shard_num, row in self.metadata_df.iterrows():
X, y, w, ids = self.get_shard(shard_num)
newx, newy, neww = fn(X, y, w)
yield (newx, newy, neww, ids)
return DiskDataset.create_dataset(
generator(), data_dir=out_dir, tasks=tasks, verbose=verbose)
@staticmethod
def from_numpy(X,
y=None,
w=None,
ids=None,
tasks=None,
data_dir=None,
verbose=True):
"""Creates a DiskDataset object from specified Numpy arrays."""
n_samples = len(X)
if ids is None:
ids = np.arange(n_samples)
if y is not None:
if w is None:
w = np.ones_like(y)
if tasks is None:
if len(y.shape) > 1:
n_tasks = y.shape[1]
else:
n_tasks = 1
tasks = np.arange(n_tasks)
else:
if w is not None:
warnings.warn('y is None but w is not None. Setting w to None',
UserWarning)
w = None
if tasks is not None:
warnings.warn('y is None but tasks is not None. Setting tasks to None',
UserWarning)
tasks = None
# raw_data = (X, y, w, ids)
return DiskDataset.create_dataset(
[(X, y, w, ids)], data_dir=data_dir, tasks=tasks, verbose=verbose)
@staticmethod
def merge(datasets, merge_dir=None):
"""Merges provided datasets into a merged dataset."""
if merge_dir is not None:
if not os.path.exists(merge_dir):
os.makedirs(merge_dir)
else:
merge_dir = tempfile.mkdtemp()
# Protect against generator exhaustion
datasets = list(datasets)
# This ensures tasks are consistent for all datasets
tasks = []
for dataset in datasets:
try:
tasks.append(dataset.tasks)
except AttributeError:
pass
if tasks:
if len(tasks) < len(datasets) or len(set(map(tuple, tasks))) > 1:
raise ValueError(
'Cannot merge datasets with different task specifications')
tasks = tasks[0]
def generator():
for ind, dataset in enumerate(datasets):
X, y, w, ids = (dataset.X, dataset.y, dataset.w, dataset.ids)
yield (X, y, w, ids)
return DiskDataset.create_dataset(
generator(), data_dir=merge_dir, tasks=tasks)
def subset(self, shard_nums, subset_dir=None):
"""Creates a subset of the original dataset on disk."""
if subset_dir is not None:
if not os.path.exists(subset_dir):
os.makedirs(subset_dir)
else:
subset_dir = tempfile.mkdtemp()
tasks = self.get_task_names()
def generator():
for shard_num, row in self.metadata_df.iterrows():
if shard_num not in shard_nums:
continue
X, y, w, ids = self.get_shard(shard_num)
yield (X, y, w, ids)
return DiskDataset.create_dataset(
generator(), data_dir=subset_dir, tasks=tasks)
def sparse_shuffle(self):
"""Shuffling that exploits data sparsity to shuffle large datasets.
Only for 1-dimensional feature vectors (does not work for tensorial
featurizations).
"""
time1 = time.time()
shard_size = self.get_shard_size()
num_shards = self.get_number_shards()
X_sparses, ys, ws, ids = [], [], [], []
num_features = None
for i in range(num_shards):
(X_s, y_s, w_s, ids_s) = self.get_shard(i)
if num_features is None:
num_features = X_s.shape[1]
X_sparse = sparsify_features(X_s)
X_sparses, ys, ws, ids = (X_sparses + [X_sparse], ys + [y_s], ws + [w_s],
ids + [np.atleast_1d(np.squeeze(ids_s))])
# Get full dataset in memory
(X_sparse, y, w, ids) = (np.vstack(X_sparses), np.vstack(ys), np.vstack(ws),
np.concatenate(ids))
# Shuffle in memory
num_samples = len(X_sparse)
permutation = np.random.permutation(num_samples)
X_sparse, y, w, ids = (X_sparse[permutation], y[permutation],
w[permutation], ids[permutation])
# Write shuffled shards out to disk
for i in range(num_shards):
start, stop = i * shard_size, (i + 1) * shard_size
(X_sparse_s, y_s, w_s, ids_s) = (X_sparse[start:stop], y[start:stop],
w[start:stop], ids[start:stop])
X_s = densify_features(X_sparse_s, num_features)
self.set_shard(i, X_s, y_s, w_s, ids_s)
time2 = time.time()
log("TIMING: sparse_shuffle took %0.3f s" % (time2 - time1), self.verbose)
def complete_shuffle(self, data_dir=None):
"""
Completely shuffle across all data, across all shards.
Note: this loads all the data into ram, and can be prohibitively
expensive for larger datasets.
Parameters
----------
shard_size: int
size of the resulting dataset's size. If None, then the first
shard's shard_size will be used.
Returns
-------
DiskDatasset
A DiskDataset with a single shard.
"""
all_X = []
all_y = []
all_w = []
all_ids = []
for Xs, ys, ws, ids in self.itershards():
all_X.append(Xs)
if ys is not None:
all_y.append(ys)
if ws is not None:
all_w.append(ws)
all_ids.append(ids)
all_X = np.concatenate(all_X)
all_y = np.concatenate(all_y)
all_w = np.concatenate(all_w)
all_ids = np.concatenate(all_ids)
perm = np.random.permutation(all_X.shape[0])
all_X = all_X[perm]
all_y = all_y[perm]
all_w = all_w[perm]
all_ids = all_ids[perm]
return DiskDataset.from_numpy(
all_X, all_y, all_w, all_ids, data_dir=data_dir)
def shuffle_each_shard(self):
"""Shuffles elements within each shard of the datset."""
tasks = self.get_task_names()
# Shuffle the arrays corresponding to each row in metadata_df
n_rows = len(self.metadata_df.index)
n_rows = len(self.metadata_df.index)
for i in range(n_rows):
row = self.metadata_df.iloc[i]
X, y, w, ids = self.get_shard(i)
n = X.shape[0]
permutation = np.random.permutation(n)
X, y, w, ids = (X[permutation], y[permutation], w[permutation],
ids[permutation])
DiskDataset.write_data_to_disk(self.data_dir, "", tasks, X, y, w, ids)
def shuffle_shards(self):
"""Shuffles the order of the shards for this dataset."""
metadata_rows = self.metadata_df.values.tolist()
random.shuffle(metadata_rows)
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def get_shard(self, i):
"""Retrieves data for the i-th shard from disk."""
row = self.metadata_df.iloc[i]
X = np.array(load_from_disk(os.path.join(self.data_dir, row['X'])))
if row['y'] is not None:
y = np.array(load_from_disk(os.path.join(self.data_dir, row['y'])))
else:
y = None
if row['w'] is not None:
# TODO (ytz): Under what condition does this exist but the file itself doesn't?
w_filename = os.path.join(self.data_dir, row['w'])
if os.path.exists(w_filename):
w = np.array(load_from_disk(w_filename))
else:
w = np.ones(y.shape)
else:
w = None
ids = np.array(
load_from_disk(os.path.join(self.data_dir, row['ids'])), dtype=object)
return (X, y, w, ids)
def add_shard(self, X, y, w, ids):
"""Adds a data shard."""
metadata_rows = self.metadata_df.values.tolist()
shard_num = len(metadata_rows)
basename = "shard-%d" % shard_num
tasks = self.get_task_names()
metadata_rows.append(
DiskDataset.write_data_to_disk(self.data_dir, basename, tasks, X, y, w,
ids))
self.metadata_df = DiskDataset._construct_metadata(metadata_rows)
self.save_to_disk()
def set_shard(self, shard_num, X, y, w, ids):
"""Writes data shard to disk"""
basename = "shard-%d" % shard_num
tasks = self.get_task_names()
DiskDataset.write_data_to_disk(self.data_dir, basename, tasks, X, y, w, ids)
def select(self, indices, select_dir=None):
"""Creates a new dataset from a selection of indices from self.
Parameters
----------
select_dir: string
Path to new directory that the selected indices will be copied to.
indices: list
List of indices to select.
"""
if select_dir is not None:
if not os.path.exists(select_dir):
os.makedirs(select_dir)
else:
select_dir = tempfile.mkdtemp()
# Handle edge case with empty indices
if not len(indices):
return DiskDataset.create_dataset(
[], data_dir=select_dir, verbose=self.verbose)
indices = np.array(sorted(indices)).astype(int)
tasks = self.get_task_names()
def generator():
count, indices_count = 0, 0
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
shard_len = len(X)
# Find indices which rest in this shard
num_shard_elts = 0
while indices[indices_count + num_shard_elts] < count + shard_len:
num_shard_elts += 1
if indices_count + num_shard_elts >= len(indices):
break
# Need to offset indices to fit within shard_size
shard_inds = indices[indices_count:indices_count +
num_shard_elts] - count
X_sel = X[shard_inds]
# Handle the case of datasets with y/w missing
if y is not None:
y_sel = y[shard_inds]
else:
y_sel = None
if w is not None:
w_sel = w[shard_inds]
else:
w_sel = None
ids_sel = ids[shard_inds]
yield (X_sel, y_sel, w_sel, ids_sel)
# Updating counts
indices_count += num_shard_elts
count += shard_len
# Break when all indices have been used up already
if indices_count >= len(indices):
return
return DiskDataset.create_dataset(
generator(), data_dir=select_dir, tasks=tasks, verbose=self.verbose)
@property
def ids(self):
"""Get the ids vector for this dataset as a single numpy array."""
if len(self) == 0:
return np.array([])
ids = []
for (_, _, _, ids_b) in self.itershards():
ids.append(np.atleast_1d(np.squeeze(ids_b)))
return np.concatenate(ids)
@property
def X(self):
"""Get the X vector for this dataset as a single numpy array."""
Xs = []
one_dimensional = False
for (X_b, _, _, _) in self.itershards():
Xs.append(X_b)
if len(X_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(Xs)
else:
return np.concatenate(Xs)
@property
def y(self):
"""Get the y vector for this dataset as a single numpy array."""
ys = []
one_dimensional = False
for (_, y_b, _, _) in self.itershards():
ys.append(y_b)
if len(y_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ys)
else:
return np.concatenate(ys)
@property
def w(self):
"""Get the weight vector for this dataset as a single numpy array."""
ws = []
one_dimensional = False
for (_, _, w_b, _) in self.itershards():
ws.append(np.array(w_b))
if len(w_b.shape) == 1:
one_dimensional = True
if not one_dimensional:
return np.vstack(ws)
else:
return np.concatenate(ws)
def __len__(self):
"""
Finds number of elements in dataset.
"""
total = 0
for _, row in self.metadata_df.iterrows():
y = load_from_disk(os.path.join(self.data_dir, row['ids']))
total += len(y)
return total
def get_shape(self):
"""Finds shape of dataset."""
n_tasks = len(self.get_task_names())
for shard_num, (X, y, w, ids) in enumerate(self.itershards()):
if shard_num == 0:
X_shape = np.array(X.shape)
if n_tasks > 0:
y_shape = np.array(y.shape)
w_shape = np.array(w.shape)
else:
y_shape = tuple()
w_shape = tuple()
ids_shape = np.array(ids.shape)
else:
X_shape[0] += np.array(X.shape)[0]
if n_tasks > 0:
y_shape[0] += np.array(y.shape)[0]
w_shape[0] += np.array(w.shape)[0]
ids_shape[0] += np.array(ids.shape)[0]
return tuple(X_shape), tuple(y_shape), tuple(w_shape), tuple(ids_shape)
def get_label_means(self):
"""Return pandas series of label means."""
return self.metadata_df["y_means"]
def get_label_stds(self):
"""Return pandas series of label stds."""
return self.metadata_df["y_stds"]
class ImageDataset(Dataset):
"""A Dataset that loads data from image files on disk."""
def __init__(self, X, y, w=None, ids=None):
"""Create a dataset whose X and/or y array is defined by image files on disk.
Parameters
----------
X: ndarray or list of strings
The dataset's input data. This may be either a single NumPy array directly
containing the data, or a list containing the paths to the image files
y: ndarray or list of strings
The dataset's labels. This may be either a single NumPy array directly
containing the data, or a list containing the paths to the image files
w: ndarray
a 1D or 2D array containing the weights for each sample or sample/task pair
ids: ndarray
the sample IDs
"""
n_samples = len(X)
if y is None:
y = np.zeros((n_samples,))
self._X_shape = self._find_array_shape(X)
self._y_shape = self._find_array_shape(y)
if w is None:
w = np.ones(self._y_shape[:2])
if ids is None:
if not isinstance(X, np.ndarray):
ids = X
elif not isinstance(y, np.ndarray):
ids = y
else:
ids = np.arange(n_samples)
self._X = X
self._y = y
self._w = w
self._ids = np.array(ids, dtype=object)
def _find_array_shape(self, array):
if isinstance(array, np.ndarray):
return array.shape
image_shape = dc.data.ImageLoader.load_img([array[0]]).shape[1:]
return np.concatenate([[len(array)], image_shape])
def __len__(self):
"""
Get the number of elements in the dataset.
"""
return self._X_shape[0]
def get_shape(self):
"""Get the shape of the dataset.
Returns four tuples, giving the shape of the X, y, w, and ids arrays.
"""
return self._X_shape, self._y_shape, self._w.shape, self._ids.shape
def get_task_names(self):
"""Get the names of the tasks associated with this dataset."""
if len(self._y_shape) < 2:
return np.array([0])
return np.arange(self._y_shape[1])
@property
def X(self):
"""Get the X vector for this dataset as a single numpy array."""
if isinstance(self._X, np.ndarray):
return self._X
return dc.data.ImageLoader.load_img(self._X)
@property
def y(self):
"""Get the y vector for this dataset as a single numpy array."""
if isinstance(self._y, np.ndarray):
return self._y
return dc.data.ImageLoader.load_img(self._y)
@property
def ids(self):
"""Get the ids vector for this dataset as a single numpy array."""
return self._ids
@property
def w(self):
"""Get the weight vector for this dataset as a single numpy array."""
return self._w
def iterbatches(self,
batch_size=None,
epoch=0,
deterministic=False,
pad_batches=False):
"""Get an object that iterates over minibatches from the dataset.
Each minibatch is returned as a tuple of four numpy arrays: (X, y, w, ids).
"""
def iterate(dataset, batch_size, deterministic, pad_batches):
n_samples = dataset._X_shape[0]
if not deterministic:
sample_perm = np.random.permutation(n_samples)
else:
sample_perm = np.arange(n_samples)
if batch_size is None:
batch_size = n_samples
batch_idx = 0
num_batches = np.math.ceil(n_samples / batch_size)
while batch_idx < num_batches:
start = batch_idx * batch_size
end = min(n_samples, (batch_idx + 1) * batch_size)
indices = range(start, end)
perm_indices = sample_perm[indices]
if isinstance(dataset._X, np.ndarray):
X_batch = dataset._X[perm_indices]
else:
X_batch = dc.data.ImageLoader.load_img(
[dataset._X[i] for i in perm_indices])
if isinstance(dataset._y, np.ndarray):
y_batch = dataset._y[perm_indices]
else:
y_batch = dc.data.ImageLoader.load_img(
[dataset._y[i] for i in perm_indices])
w_batch = dataset._w[perm_indices]
ids_batch = dataset._ids[perm_indices]
if pad_batches:
(X_batch, y_batch, w_batch, ids_batch) = pad_batch(
batch_size, X_batch, y_batch, w_batch, ids_batch)
batch_idx += 1
yield (X_batch, y_batch, w_batch, ids_batch)
return iterate(self, batch_size, deterministic, pad_batches)
def itersamples(self):
"""Get an object that iterates over the samples in the dataset.
Example:
>>> dataset = NumpyDataset(np.ones((2,2)))
>>> for x, y, w, id in dataset.itersamples():
... print(x.tolist(), y.tolist(), w.tolist(), id)
[1.0, 1.0] [0.0] [0.0] 0
[1.0, 1.0] [0.0] [0.0] 1
"""
def get_image(array, index):
if isinstance(array, np.ndarray):
return array[index]
return dc.data.ImageLoader.load_img([array[index]])[0]
n_samples = self._X_shape[0]
return ((get_image(self._X, i), get_image(self._y, i), self._w[i],
self._ids[i]) for i in range(n_samples))
def transform(self, fn, **args):
"""Construct a new dataset by applying a transformation to every sample in this dataset.
The argument is a function that can be called as follows:
>> newx, newy, neww = fn(x, y, w)
It might be called only once with the whole dataset, or multiple times with
different subsets of the data. Each time it is called, it should transform
the samples and return the transformed data.
Parameters
----------
fn: function
A function to apply to each sample in the dataset
Returns
-------
a newly constructed Dataset object
"""
newx, newy, neww = fn(self.X, self.y, self.w)
return NumpyDataset(newx, newy, neww, self.ids[:])
def select(self, indices, select_dir=None):
"""Creates a new dataset from a selection of indices from self.
TODO(rbharath): select_dir is here due to dc.splits always passing in
splits.
Parameters
----------
indices: list
List of indices to select.
select_dir: string
Ignored.
"""
if isinstance(self._X, np.ndarray):
X = self._X[indices]
else:
X = [self._X[i] for i in indices]
if isinstance(self._y, np.ndarray):
y = self._y[indices]
else:
y = [self._y[i] for i in indices]
w = self._w[indices]
ids = self._ids[indices]
return ImageDataset(X, y, w, ids)
class Databag(object):
"""
A utility class to iterate through multiple datasets together.
"""
def __init__(self, datasets=None):
if datasets is None:
self.datasets = dict()
else:
self.datasets = datasets
def add_dataset(self, key, dataset):
self.datasets[key] = dataset
def iterbatches(self, **kwargs):
"""
Loop through all internal datasets in the same order
Parameters
----------
batch_size: int
Number of samples from each dataset to return
epoch: int
Number of times to loop through the datasets
pad_batches: boolean
Should all batches==batch_size
Returns
-------
Generator which yields a dictionary {key: dataset.X[batch]}
"""
key_order = [x for x in self.datasets.keys()]
if "epochs" in kwargs:
epochs = kwargs['epochs']
del kwargs['epochs']
else:
epochs = 1
kwargs['deterministic'] = True
for epoch in range(epochs):
iterators = [self.datasets[x].iterbatches(**kwargs) for x in key_order]
for tup in zip(*iterators):
m_d = {key_order[i]: tup[i][0] for i in range(len(key_order))}
yield m_d
| mit |
MichaelRichardson12/mom_python | plot.py | 1 | 1732 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 31 12:28:40 2017
@author: michael
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import parameters as params
import numpy as np
def plot_mesh(node_coords):
plt.figure(1)
plt.title('Mesh')
plt.xlabel(r'x-coordinates [$\lambda_{0}$]')
plt.ylabel(r'y-coordinates [$\lambda_{0}$]')
plt.plot(node_coords[:,0], node_coords[:,1], '-r')
plt.axis('equal')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,y1 - 2 ,y2 + 2))
plt.grid(True)
plt.show()
def plot_scatter(scattering):
plt.figure(2)
plt.title('2D RCS Real')
plt.xlabel(r'$\phi$ [degrees]')
plt.ylabel(r'$\sigma_{2D}$ [dB]')
plt.plot(params.plot_angs, scattering.real, label='MoM')
plt.grid(True)
plt.show()
def plot_scat_polar(scattering):
plt.figure(3)
ax = plt.subplot(projection='polar')
ax.plot(params.phi_fieldpoints, scattering)
ax.set_rmax(np.max(scattering) + 5)
ax.grid(True)
ax.set_xlabel(r'$\phi$ [degrees]')
# ax.set_ylabel(r'$\sigma_{2D}$ [dB]')
ax.set_title("2D RCS Real", y=1.07)
plt.show()
def plot_multiple_plots(data):
x = np.arange(params.num_quads)
ys = [ii + x + (ii*x)**2 for ii in range(10)]
colors = cm.rainbow(np.linspace(0, 1, len(ys)))
plt.figure(2)
plt.title('2D RCS Real')
plt.xlabel(r'$\phi$ [degrees]')
plt.ylabel(r'$\sigma_{2D}$ [dB]')
plt.grid(True)
for ii in range(0, params.num_quads):
plt.plot(params.plot_angs, data[ii][0].real, color=colors[ii], label="{} Gauss Quad".format(ii))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.show() | mit |
mxjl620/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
SummaLabs/DLS | app/backend-test/core_convertors/run04_test_kerasModel2DLS_PyGraphviz.py | 1 | 1805 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
os.environ['THEANO_FLAGS'] = "device=cpu"
import glob
import json
import networkx as nx
import matplotlib.pyplot as plt
import skimage.io as io
from pprint import pprint
from app.backend.core.models.convertors import keras2dls
import pygraphviz as pyg
#########################################
pathWithDatasets='../../../data-test/test_caffe_models'
pathOutModels='../../../data/network/saved'
lstLayouts=['dot', 'neato']
#########################################
if __name__ == '__main__':
# lstModelsPaths = glob.glob('%s/*-kerasmodel.json' % pathWithDatasets)
# lstModelsPaths = glob.glob('%s/bvlc_alexnet*-kerasmodel.json' % pathWithDatasets)
lstModelsPaths = glob.glob('%s/bvlc_googlenet*-kerasmodel.json' % pathWithDatasets)
pprint(lstModelsPaths)
#
for ii,pp in enumerate(lstModelsPaths):
theFinalDLSModel = keras2dls.convertKeras2DLS(pp, graphvizLayout=None)
#
lstLayers = theFinalDLSModel['layers']
theGraph = pyg.AGraph()
for LL in lstLayers:
tid = LL['id']
for ww in LL['wires']:
theGraph.add_edge(tid, ww)
theGraph.layout(prog='dot', args="-Grankdir=TB")
print(theGraph.nodes())
fout = 'test_pygraph.png'
theGraph.draw(fout)
plt.imshow(io.imread(fout))
plt.show()
#
foutModel=os.path.abspath('%s/%s_converted_%s.json' % (pathOutModels, os.path.splitext(os.path.basename(pp))[0], ll))
print ('[%d/%d] convert: %s --> [%s]' % (ii, len(lstModelsPaths), os.path.basename(pp), foutModel))
with open(foutModel, 'w') as f:
f.write(json.dumps(theFinalDLSModel, indent=4))
# nx.draw(theGraph, theGraphPos)
# plt.show()
| mit |
bigdataelephants/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
weegreenblobbie/sd_audio_hackers | 20160626_spectrograms_explained/code/run_spectrogram.py | 1 | 11325 | """
San Diego Audio Hackers
https://github.com/weegreenblobbie/sd_audio_hackers
2016 Nick Hilton
"""
# python
import argparse
import os.path
import sys
assert sys.version_info.major == 2, "python 2 only!"
# third party
import matplotlib.pyplot as plt
import numpy as np
#~from scipy.io.wavfile import read as _wavread # broken!
# Using Nsound to read wavefile since scipy 0.13.3 is broken for stereo, int32 files
import Nsound as ns
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--channel',
type = int,
default = None,
help = 'Selectes one channel if the input wave contains multiple channels',
)
parser.add_argument(
'input_wav',
help = 'The input wavfile to process',
)
args = parser.parse_args()
if not os.path.isfile(args.input_wav):
raise RuntimeError("Could not find file: %s" % args.input_wav)
#-------------------------------------------------------------------------
# read wavfile in
sr, x = wavread(args.input_wav)
if x.ndim > 1:
if args.channel is not None:
x = x[:, args.channel]
else:
raise RuntimeError(
'Input wav has %d channles, use --channels to select one' % x.ndim
)
#-----------------------------------------------------------------------------
# compute spectrogram
cfg = Stft.get_defaults()
cfg['sample_rate'] = sr
stft_op = Stft(**cfg)
data = stft_op(sample_rate = sr, signal = x)
#-------------------------------------------------------------------------
# plot data
time_axis = data['stft_time_axis']
freq_axis = data['stft_freq_axis']
amp = np.abs(data['stft_spec']) ** 0.33
plt.figure()
imagesc(time_axis, freq_axis, amp.T, cmap = 'bone')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Spectrogram: %s' % os.path.basename(args.input_wav))
plt.ylim([freq_axis[0], 5000])
plt.show()
"""
Short time Fourier transform
"""
class Stft:
@staticmethod
def get_defaults():
return dict(
sample_rate = 8000.0,
t_sigma = 0.01,
t_step = 0.01,
f_step = 16.666,
window = 'gaussian',
)
def __init__(self, **kwargs):
"""
Computes the short time fourier transform on input signal.
keyword arguments:
sample_rate : float
The sample rate of the input signal.
t_sigma : float
The standard deviation of the gaussian time-envelope used for
the window.
t_step : float
The time step in seconds between fft samples
f_step : float
The frequency axis step size (nfft & frame_size are derived from this)
window : str
The name of the window to apply to the frame, one of: [
'gaussian', 'rectangular']
"""
sr = kwargs['sample_rate']
t_sigma = kwargs['t_sigma']
t_step = kwargs['t_step']
f_step = kwargs['f_step']
window = kwargs['window']
assert sr > 0, "sample_rate <= 0"
assert t_sigma > 0, "t_sigma <= 0"
assert t_step > 0, "t_step <= 0"
assert f_step > 0, "f_step <= 0"
step = int(np.round(sr * t_step))
#---------------------------------------------------------------------
# compute frame size, nearest power of 2
size_f_step = int(sr / f_step)
size_t_sigma = int(np.round(sr * 6.0 * t_sigma))
frame_size = round_up2(min(size_f_step, size_t_sigma))
nfft = frame_size
#---------------------------------------------------------------------
# setup freq axis
nyquist = sr / 2.0
freq_axis = np.linspace(0, nyquist, nfft / 2 + 1).astype(np.float32)
#---------------------------------------------------------------------
# window
if window == 'gaussian':
t = np.arange(nfft) / float(sr)
mu = np.mean(t)
w = np.exp(-0.5 * ((t - mu) / t_sigma) ** 2.0)
elif window == 'rectangular':
w = np.ones(nfft)
else:
raise ValueError('unknown window type "%s"' % window)
w = w.astype(np.float32)
w /= np.sum(w)
#---------------------------------------------------------------------
# save values into object
self._freq_axis = freq_axis
self._nfft = nfft
self._sample_rate = sr
self._step = step
self._window = w
def __call__(self, **kwargs):
"""
inputs: signal, sample_rate
outputs: stft_spec, stft_freq_axis, stft_time_axis
"""
signal = kwargs['signal']
sample_rate = kwargs['sample_rate']
assert int(self._sample_rate) == int(sample_rate), "sample_rate != %d" % self._sample_rate
assert signal.ndim == 1, "signal must be 1D"
# compute slices of the input signal
sample_slices = compute_sample_slices(
len(signal),
self._nfft,
self._step
)
#---------------------------------------------------------------------
# forward stft
n_time = len(sample_slices)
time_axis = np.zeros(n_time)
spec = np.zeros((n_time, len(self._freq_axis)), np.complex64)
for i in xrange(n_time):
center, s0, s1, pad_l, pad_r = sample_slices[i]
time_axis[i] = center
s = np.array(signal[s0 : s1])
if pad_l > 0:
s = np.hstack([np.zeros((pad_l), np.float32), s])
if pad_r > 0:
s = np.hstack([s, np.zeros((pad_r), np.float32)])
s = s * self._window
spec[i,:] = np.fft.rfft(s)
#---------------------------------------------------------------------
# conver time axis into seconds
time_axis = time_axis.astype(np.float32) / self._sample_rate
out = dict(
stft_spec = spec,
stft_freq_axis = np.array(self._freq_axis),
stft_time_axis = time_axis,
)
kwargs.update(out)
return kwargs
def round_up2(n):
"""
Rounds up to next power of 2. Returns n if n is already a power of 2.
"""
assert n > 0, "n <= 0"
return int(2 ** np.ceil(np.log(n) / np.log(2)))
def compute_sample_slices(N, frame_size, step):
"""
Computes tart and stop indices and padding.
Returns a list of tuples:
(center_idx, begin_idx, end_idx, pad_left, pad_right),
"""
assert N > 0, 'N <= 0'
assert frame_size > 0, 'frame_size <= 0'
assert step > 0, 'step <= 0'
#-------------------------------------------------------------------------
# compute center indicies for each frame
h_frame = frame_size // 2
centers = []
c_idx = 0
while c_idx < N + h_frame:
centers.append(c_idx)
c_idx += step
#-------------------------------------------------------------------------
# sampl
sample_slices = []
for c_idx in centers:
i0 = c_idx - h_frame
i1 = c_idx + h_frame
pad_l = 0
pad_r = 0
if i0 < 0:
pad_l = abs(i0)
i0 = 0
if i1 >= N:
pad_r = i1 - N + 1
i1 = N - 1
sample_slices.append( (c_idx, i0, i1, pad_l, pad_r) )
return sample_slices
def imagesc(x_axis, y_axis, Z, axes = None, **kwargs):
"""
Plots the 2D matriz Z using the provided x & y axis for data labels.
Additional keyword argumnts will be passed on to the Matplotlib imshow()
method.
Parameters:
*x_axis*
The data labels to use for the x axis, must be linear (shape N)
*y_axis*
The data labels to use for the y axis, must be linear (shape M)
*Z*
The data matrix to plot (shape M,N)
*axes*
The matplotlib.axes.Axes to draw on
`**kwargs`
Keyword arguments passed to matplotlib.axes.Axes.imshow()
Returns:
*h*
The graphical handle
Examples:
By default, the origin is in the lower left corner:
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from prospero.plot.imagesc import imagesc
# Create a data matrix, the Idenity with some zeros concatenated
data = np.eye(5)
data = np.hstack( (data, np.zeros((5,1))) )
x_axis = range(6)
y_axis = range(5)
plt.figure()
imagesc(x_axis, y_axis, data)
plt.xlabel("X axis")
plt.ylabel("Y axis")
To change it, pass the matplotlib.axes.Axes.imshow() keyword argument
`origin="upper"`:
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from prospero.plot.imagesc import imagesc
# Create a data matrix, the Idenity with some zeros concatenated
data = np.eye(5)
data = np.hstack( (data, np.zeros((5,1))) )
x_axis = range(6)
y_axis = range(5)
plt.figure()
imagesc(x_axis, y_axis, data, origin = "upper")
plt.xlabel("X axis")
plt.ylabel("Y axis")
"""
# always make a copy so that caller doesn't get a linked colormap by
# accident!
Z = np.array(Z)
if Z.ndim != 2:
raise ValueError("Z should be 2D, not %dD" % Z.ndim)
M, N = Z.shape
if x_axis is None:
x_axis = np.arange(N).astype(np.int32)
if y_axis is None:
y_axis = np.arange(M).astype(np.int32)
# Convert to arrays if lists.
x_axis = np.array(x_axis)
y_axis = np.array(y_axis)
if M != y_axis.size:
raise ValueError("y_axis.size != Z rows (%d != %d)" %(y_axis.size, M))
if N != x_axis.size:
raise ValueError("x_axis.size != Z cols (%d != %d)" %(x_axis.size, N))
# Override these if not set.
kwargs.setdefault('origin', 'lower')
kwargs.setdefault('interpolation', 'nearest')
y_axis = y_axis[::-1]
if kwargs['origin'] == 'lower':
y_axis = y_axis[::-1]
dx = x_axis[1] - x_axis[0]
dy = y_axis[1] - y_axis[0]
extent = \
[
x_axis[0] - 0.5 * dx,
x_axis[-1] + 0.5 * dx,
y_axis[0] - 0.5 * dy,
y_axis[-1] + 0.5 * dy
]
# Always override these keyword.
kwargs["extent"] = extent
ax = axes
if ax is None:
h = plt.imshow(Z, **kwargs)
ax = plt.gca()
else:
h = ax.imshow(Z, **kwargs)
ax.axis("tight")
return h
def wavread(filename):
import Nsound as ns
a = ns.AudioStream(filename)
sr = a.getSampleRate()
n_channels = a.getNChannels()
n_samples = a.getLength()
x = np.zeros((n_samples, n_channels), np.float32)
for c in range(n_channels):
x[:,c] = a[c].toList()
x = np.squeeze(x) # remove singular dimensions if present
return sr, x
if __name__ == "__main__":
main()
| mit |
InnovArul/codesmart | Assignments/Jul-Nov-2017/reinforcement_learning_udemy/rl/monte_carlo_approximation.py | 1 | 1822 | import numpy as np
import matplotlib.pyplot as plt
from iterative_policy_evaluation import print_values, print_policy
from grid import standard_grid, negative_grid
from monte_carlo_random import play_game, random_action, EPS, GAMMA, ALL_POSSIBLE_ACTIONS
LEARNING_RATE = 0.001
if __name__ == '__main__':
grid = standard_grid()
print('rewards')
print_values(grid.rewards, grid)
# define a policy
policy = {
(2, 0) : 'U',
(1, 0) : 'U',
(0, 0) : 'R',
(0, 1) : 'R',
(0, 2) : 'R',
(1, 2) : 'U',
(2, 1) : 'L',
(2, 2) : 'U',
(2, 3) : 'L'
}
theta = np.random.randn(4) / 2
def s2x(s):
return np.array([s[0] - 1, s[1] - 1.5, s[0] * s[1] - 3, 1])
deltas = []
t = 1.0
for it in range(20000):
if it % 100 == 0:
t += 0.01
alpha = LEARNING_RATE / t
biggest_change = 0
states_and_returns = play_game(grid, policy)
seen_states = set() # First-visit MC method
for s, G in states_and_returns:
if s not in seen_states:
old_theta = theta.copy()
# predict the return using parameters
x = s2x(s)
V_hat = theta.dot(x)
theta += alpha * (G - V_hat) * x
biggest_change = max(biggest_change, np.abs(theta - old_theta).sum())
seen_states.add(s)
deltas.append(biggest_change)
plt.plot(deltas)
plt.show()
# obtain predicted values for V
V = {}
for s in grid.actions:
V[s] = theta.dot(s2x(s))
print('values')
print_values(V, grid)
print('policy')
print_policy(policy, grid)
| gpl-2.0 |
astocko/statsmodels | statsmodels/datasets/grunfeld/data.py | 24 | 2794 | """Grunfeld (1950) Investment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """This is the Grunfeld (1950) Investment Data.
The source for the data was the original 11-firm data set from Grunfeld's Ph.D.
thesis recreated by Kleiber and Zeileis (2008) "The Grunfeld Data at 50".
The data can be found here.
http://statmath.wu-wien.ac.at/~zeileis/grunfeld/
For a note on the many versions of the Grunfeld data circulating see:
http://www.stanford.edu/~clint/bench/grunfeld.htm
"""
DESCRSHORT = """Grunfeld (1950) Investment Data for 11 U.S. Firms."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of observations - 220 (20 years for 11 firms)
Number of variables - 5
Variables name definitions::
invest - Gross investment in 1947 dollars
value - Market value as of Dec. 31 in 1947 dollars
capital - Stock of plant and equipment in 1947 dollars
firm - General Motors, US Steel, General Electric, Chrysler,
Atlantic Refining, IBM, Union Oil, Westinghouse, Goodyear,
Diamond Match, American Steel
year - 1935 - 1954
Note that raw_data has firm expanded to dummy variables, since it is a
string categorical variable.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
from statsmodels.tools import categorical
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray(data, endog_idx=0, stack=False)
ds.raw_data = raw_data
return ds
def load_pandas():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
from pandas import DataFrame
from statsmodels.tools import categorical
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray_pandas(data, endog_idx=0)
ds.raw_data = DataFrame(raw_data)
return ds
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/grunfeld.csv','rb'), delimiter=",",
names=True, dtype="f8,f8,f8,a17,f8")
return data
| bsd-3-clause |
beepee14/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
cwu2011/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
kjung/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
bavardage/statsmodels | statsmodels/tsa/vector_ar/tests/test_var.py | 3 | 16672 | """
Test VAR Model
"""
from __future__ import with_statement
# pylint: disable=W0612,W0231
from cStringIO import StringIO
from nose.tools import assert_raises
import nose
import os
import sys
import numpy as np
import statsmodels.api as sm
import statsmodels.tsa.vector_ar.var_model as model
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tools.data as data_util
from statsmodels.tsa.vector_ar.var_model import VAR
from statsmodels.compatnp.py3k import BytesIO
from numpy.testing import assert_almost_equal, assert_equal, assert_
DECIMAL_12 = 12
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
class CheckVAR(object):
# just so pylint won't complain
res1 = None
res2 = None
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)
def test_neqs(self):
assert_equal(self.res1.neqs, self.res2.neqs)
def test_nobs(self):
assert_equal(self.res1.avobs, self.res2.nobs)
def test_df_eq(self):
assert_equal(self.res1.df_eq, self.res2.df_eq)
def test_rmse(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].mse_resid**.5,
eval('self.res2.rmse_'+str(i+1)), DECIMAL_6)
def test_rsquared(self):
results = self.res1.results
for i in range(len(results)):
assert_almost_equal(results[i].rsquared,
eval('self.res2.rsquared_'+str(i+1)), DECIMAL_3)
def test_llf(self):
results = self.res1.results
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)
for i in range(len(results)):
assert_almost_equal(results[i].llf,
eval('self.res2.llf_'+str(i+1)), DECIMAL_2)
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic)
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic)
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe)
def test_detsig(self):
assert_almost_equal(self.res1.detomega, self.res2.detsig)
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def get_macrodata():
data = sm.datasets.macrodata.load().data[['realgdp','realcons','realinv']]
names = data.dtype.names
nd = data.view((float,3))
nd = np.diff(np.log(nd), axis=0)
return nd.ravel().view(data.dtype)
def generate_var():
from rpy2.robjects import r
import pandas.rpy.common as prp
r.source('tests/var.R')
return prp.convert_robj(r['result'], use_pandas=False)
def write_generate_var():
result = generate_var()
np.savez('tests/results/vars_results.npz', **result)
class RResults(object):
"""
Simple interface with results generated by "vars" package in R.
"""
def __init__(self):
#data = np.load(resultspath + 'vars_results.npz')
from results.results_var_data import var_results
data = var_results.__dict__
self.names = data['coefs'].dtype.names
self.params = data['coefs'].view((float, len(self.names)))
self.stderr = data['stderr'].view((float, len(self.names)))
self.irf = data['irf'].item()
self.orth_irf = data['orthirf'].item()
self.nirfs = int(data['nirfs'][0])
self.nobs = int(data['obs'][0])
self.totobs = int(data['totobs'][0])
crit = data['crit'].item()
self.aic = crit['aic'][0]
self.sic = self.bic = crit['sic'][0]
self.hqic = crit['hqic'][0]
self.fpe = crit['fpe'][0]
self.detomega = data['detomega'][0]
self.loglike = data['loglike'][0]
self.nahead = int(data['nahead'][0])
self.ma_rep = data['phis']
self.causality = data['causality']
def close_plots():
try:
import matplotlib.pyplot as plt
plt.close('all')
except ImportError:
pass
_orig_stdout = None
def setup_module():
global _orig_stdout
_orig_stdout = sys.stdout
sys.stdout = StringIO()
def teardown_module():
sys.stdout = _orig_stdout
close_plots()
def have_matplotlib():
try:
import matplotlib
if matplotlib.__version__ < '1':
raise
return True
except:
return False
class CheckIRF(object):
ref = None; res = None; irf = None
k = None
#---------------------------------------------------------------------------
# IRF tests
def test_irf_coefs(self):
self._check_irfs(self.irf.irfs, self.ref.irf)
self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)
def _check_irfs(self, py_irfs, r_irfs):
for i, name in enumerate(self.res.names):
ref_irfs = r_irfs[name].view((float, self.k))
res_irfs = py_irfs[:, :, i]
assert_almost_equal(ref_irfs, res_irfs)
def test_plot_irf(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot()
self.irf.plot(plot_stderr=False)
self.irf.plot(impulse=0, response=1)
self.irf.plot(impulse=0)
self.irf.plot(response=0)
self.irf.plot(orth=True)
self.irf.plot(impulse=0, response=1, orth=True)
close_plots()
def test_plot_cum_effects(self):
if not have_matplotlib():
raise nose.SkipTest
self.irf.plot_cum_effects()
self.irf.plot_cum_effects(plot_stderr=False)
self.irf.plot_cum_effects(impulse=0, response=1)
self.irf.plot_cum_effects(orth=True)
self.irf.plot_cum_effects(impulse=0, response=1, orth=True)
close_plots()
class CheckFEVD(object):
fevd = None
#---------------------------------------------------------------------------
# FEVD tests
def test_fevd_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.fevd.plot()
close_plots()
def test_fevd_repr(self):
print self.fevd
def test_fevd_summary(self):
self.fevd.summary()
def test_fevd_cov(self):
# test does not crash
# not implemented
# covs = self.fevd.cov()
pass
class TestVARResults(CheckIRF, CheckFEVD):
@classmethod
def setupClass(cls):
cls.p = 2
cls.data = get_macrodata()
cls.model = VAR(cls.data)
cls.names = cls.model.names
cls.ref = RResults()
cls.k = len(cls.ref.names)
cls.res = cls.model.fit(maxlags=cls.p)
cls.irf = cls.res.irf(cls.ref.nirfs)
cls.nahead = cls.ref.nahead
cls.fevd = cls.res.fevd()
def test_constructor(self):
# make sure this works with no names
ndarr = self.data.view((float, 3))
model = VAR(ndarr)
res = model.fit(self.p)
def test_names(self):
assert_equal(self.model.names, self.ref.names)
model2 = VAR(self.data, names=self.names)
assert_equal(model2.names, self.ref.names)
def test_get_eq_index(self):
assert(isinstance(self.res.names, list))
for i, name in enumerate(self.names):
idx = self.res.get_eq_index(i)
idx2 = self.res.get_eq_index(name)
assert_equal(idx, i)
assert_equal(idx, idx2)
assert_raises(Exception, self.res.get_eq_index, 'foo')
def test_repr(self):
# just want this to work
foo = str(self.res)
bar = repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
def test_cov_params(self):
# do nothing for now
self.res.cov_params
def test_cov_ybar(self):
self.res.cov_ybar()
def test_tstat(self):
self.res.tvalues
def test_pvalues(self):
self.res.pvalues
def test_summary(self):
summ = self.res.summary()
print summ
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
def test_aic(self):
assert_almost_equal(self.res.aic, self.ref.aic)
def test_bic(self):
assert_almost_equal(self.res.bic, self.ref.bic)
def test_hqic(self):
assert_almost_equal(self.res.hqic, self.ref.hqic)
def test_fpe(self):
assert_almost_equal(self.res.fpe, self.ref.fpe)
def test_lagorder_select(self):
ics = ['aic', 'fpe', 'hqic', 'bic']
for ic in ics:
res = self.model.fit(maxlags=10, ic=ic, verbose=True)
assert_raises(Exception, self.model.fit, ic='foo')
def test_nobs(self):
assert_equal(self.res.nobs, self.ref.nobs)
def test_stderr(self):
assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)
def test_loglike(self):
assert_almost_equal(self.res.llf, self.ref.loglike)
def test_ma_rep(self):
ma_rep = self.res.ma_rep(self.nahead)
assert_almost_equal(ma_rep, self.ref.ma_rep)
#--------------------------------------------------
# Lots of tests to make sure stuff works...need to check correctness
def test_causality(self):
causedby = self.ref.causality['causedby']
for i, name in enumerate(self.names):
variables = self.names[:i] + self.names[i + 1:]
result = self.res.test_causality(name, variables, kind='f')
assert_almost_equal(result['pvalue'], causedby[i], DECIMAL_4)
rng = range(self.k)
rng.remove(i)
result2 = self.res.test_causality(i, rng, kind='f')
assert_almost_equal(result['pvalue'], result2['pvalue'], DECIMAL_12)
# make sure works
result = self.res.test_causality(name, variables, kind='wald')
# corner cases
_ = self.res.test_causality(self.names[0], self.names[1])
_ = self.res.test_causality(0, 1)
assert_raises(Exception,self.res.test_causality, 0, 1, kind='foo')
def test_select_order(self):
result = self.model.fit(10, ic='aic', verbose=True)
result = self.model.fit(10, ic='fpe', verbose=True)
# bug
model = VAR(self.model.endog)
model.select_order()
def test_is_stable(self):
# may not necessarily be true for other datasets
assert(self.res.is_stable(verbose=True))
def test_acf(self):
# test that it works...for now
acfs = self.res.acf(10)
# defaults to nlags=lag_order
acfs = self.res.acf()
assert(len(acfs) == self.p + 1)
def test_acorr(self):
acorrs = self.res.acorr(10)
def test_forecast(self):
point = self.res.forecast(self.res.y[-5:], 5)
def test_forecast_interval(self):
y = self.res.y[:-self.p:]
point, lower, upper = self.res.forecast_interval(y, 5)
def test_plot_sim(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plotsim(steps=100)
close_plots()
def test_plot(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot()
close_plots()
def test_plot_acorr(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_acorr()
close_plots()
def test_plot_forecast(self):
if not have_matplotlib():
raise nose.SkipTest
self.res.plot_forecast(5)
close_plots()
def test_reorder(self):
#manually reorder
data = self.data.view((float,3))
names = self.names
data2 = np.append(np.append(data[:,2,None], data[:,0,None], axis=1), data[:,1,None], axis=1)
names2 = []
names2.append(names[2])
names2.append(names[0])
names2.append(names[1])
res2 = VAR(data2,names=names2).fit(maxlags=self.p)
#use reorder function
res3 = self.res.reorder(['realinv','realgdp', 'realcons'])
#check if the main results match
assert_almost_equal(res2.params, res3.params)
assert_almost_equal(res2.sigma_u, res3.sigma_u)
assert_almost_equal(res2.bic, res3.bic)
assert_almost_equal(res2.stderr, res3.stderr)
def test_pickle(self):
from statsmodels.compatnp.py3k import BytesIO
fh = BytesIO()
#test wrapped results load save pickle
self.res.save(fh)
fh.seek(0,0)
res_unpickled = self.res.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res))
class E1_Results(object):
"""
Results from Lutkepohl (2005) using E2 dataset
"""
def __init__(self):
# Lutkepohl p. 120 results
# I asked the author about these results and there is probably rounding
# error in the book, so I adjusted these test results to match what is
# coming out of the Python (double-checked) calculations
self.irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.129, 0.547, 0.663],
[0.032, 0.134, 0.163],
[0.026, 0.108, 0.131]],
[[0.084, .385, .479],
[.016, .079, .095],
[.016, .078, .103]]])
self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664 ],
[0.032, 0.139, 0.169],
[0.026, 0.112, 0.136]],
[[0.149, 0.631, 0.764],
[0.044, 0.185, 0.224],
[0.033, 0.140, 0.169]],
[[0.099, .468, .555],
[.038, .170, .205],
[.033, .150, .185]]])
self.lr_stderr = np.array([[.134, .645, .808],
[.048, .230, .288],
[.043, .208, .260]])
basepath = os.path.split(sm.__file__)[0]
resultspath = basepath + '/tsa/vector_ar/tests/results/'
def get_lutkepohl_data(name='e2'):
lut_data = basepath + '/tsa/vector_ar/data/'
path = lut_data + '%s.dat' % name
return util.parse_lutkepohl_data(path)
def test_lutkepohl_parse():
files = ['e%d' % i for i in range(1, 7)]
for f in files:
get_lutkepohl_data(f)
class TestVARResultsLutkepohl(object):
"""
Verify calculations using results from Lutkepohl's book
"""
def __init__(self):
self.p = 2
sdata, dates = get_lutkepohl_data('e1')
names = sdata.dtype.names
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
self.model = VAR(adj_data[:-16], dates=dates[1:-16], names=names,
freq='Q')
self.res = self.model.fit(maxlags=self.p)
self.irf = self.res.irf(10)
self.lut = E1_Results()
def test_approx_mse(self):
# 3.5.18, p. 99
mse2 = np.array([[25.12, .580, 1.300],
[.580, 1.581, .586],
[1.300, .586, 1.009]]) * 1e-4
assert_almost_equal(mse2, self.res.forecast_cov(3)[1],
DECIMAL_3)
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
assert_almost_equal(np.round(irf_stderr[i], 3),
self.lut.irf_stderr[i-1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
assert_almost_equal(np.round(stderr[i], 3),
self.lut.cum_irf_stderr[i-1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
def test_get_trendorder():
results = {
'c' : 1,
'nc' : 0,
'ct' : 2,
'ctt' : 3
}
for t, trendorder in results.iteritems():
assert(util.get_trendorder(t) == trendorder)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
tompollard/tableone | test_tableone.py | 1 | 50834 | import random
import warnings
from nose.tools import (with_setup, assert_raises, assert_equal,
assert_almost_equal, assert_list_equal,
assert_count_equal)
import numpy as np
import pandas as pd
from scipy import stats
import tableone
from tableone import TableOne, load_dataset
from tableone.tableone import InputError
from tableone.modality import hartigan_diptest, generate_data
def mytest(*args):
"""
Hypothesis test for test_self_defined_statistical_tests
"""
mytest.__name__ = "Test name"
_, pval = stats.ks_2samp(*args)
return pval
class TestTableOne(object):
"""
Tests for TableOne
"""
def setup(self):
"""
set up test fixtures
"""
seed = 12345
np.random.seed(seed)
self.data_pn = load_dataset('pn2012')
self.data_sample = self.create_sample_dataset(n=10000)
self.data_small = self.create_small_dataset()
self.data_groups = self.create_another_dataset(n=20)
self.data_categorical = self.create_categorical_dataset()
self.data_mixed = self.create_mixed_datatypes_dataset()
def create_sample_dataset(self, n):
"""
create sample dataset
"""
data_sample = pd.DataFrame(index=range(n))
mu, sigma = 10, 1
data_sample['normal'] = np.random.normal(mu, sigma, n)
data_sample['nonnormal'] = np.random.noncentral_chisquare(20, nonc=2,
size=n)
bears = ['Winnie', 'Paddington', 'Baloo', 'Blossom']
data_sample['bear'] = np.random.choice(bears, n,
p=[0.5, 0.1, 0.1, 0.3])
data_sample['likeshoney'] = np.nan
data_sample.loc[data_sample['bear'] == 'Winnie', 'likeshoney'] = 1
data_sample.loc[data_sample['bear'] == 'Baloo', 'likeshoney'] = 1
data_sample['likesmarmalade'] = 0
data_sample.loc[data_sample['bear'] == 'Paddington',
'likesmarmalade'] = 1
data_sample['height'] = 0
data_sample.loc[data_sample['bear'] == 'Winnie', 'height'] = 6
data_sample.loc[data_sample['bear'] == 'Paddington', 'height'] = 4
data_sample.loc[data_sample['bear'] == 'Baloo', 'height'] = 20
data_sample.loc[data_sample['bear'] == 'Blossom', 'height'] = 7
data_sample['fictional'] = 0
data_sample.loc[data_sample['bear'] == 'Winnie', 'fictional'] = 1
data_sample.loc[data_sample['bear'] == 'Paddington', 'fictional'] = 1
data_sample.loc[data_sample['bear'] == 'Baloo', 'fictional'] = 1
data_sample.loc[data_sample['bear'] == 'Blossom', 'fictional'] = 1
return data_sample
def create_small_dataset(self):
"""
create small dataset
"""
data_small = pd.DataFrame(index=range(10))
data_small['group1'] = 0
data_small.loc[0:4, 'group1'] = 1
data_small['group2'] = 0
data_small.loc[2:7, 'group2'] = 1
data_small['group3'] = 0
data_small.loc[1:2, 'group3'] = 1
data_small.loc[3:7, 'group3'] = 2
return data_small
def create_another_dataset(self, n):
"""
create another dataset
"""
data_groups = pd.DataFrame(index=range(n))
data_groups['group'] = 'group1'
data_groups.loc[2:6, 'group'] = 'group2'
data_groups.loc[6:12, 'group'] = 'group3'
data_groups.loc[12: n, 'group'] = 'group4'
data_groups['age'] = range(n)
data_groups['weight'] = [x+100 for x in range(n)]
return data_groups
def create_categorical_dataset(self, n_cat=100, n_obs_per_cat=1000,
n_col=10):
"""
create a dataframe with many categories of many levels
"""
# dataframe with many categories of many levels
# generate integers to represent data
data = np.arange(n_cat*n_obs_per_cat*n_col)
# use modulus to create categories - unique for each column
data = np.mod(data, n_cat*n_col)
# reshape intro a matrix
data = data.reshape(n_cat*n_obs_per_cat, n_col)
return pd.DataFrame(data)
def create_mixed_datatypes_dataset(self, n=20):
"""
create a dataframe with mixed datatypes in the same column
"""
data_mixed = pd.DataFrame(index=range(n))
data_mixed['string data'] = 'a'
mu, sigma = 50, 5
data_mixed['mixed numeric data'] = np.random.normal(mu, sigma, n)
data_mixed.loc[1, 'mixed numeric data'] = 'could not measure'
return data_mixed
def teardown(self):
"""
tear down test fixtures
"""
pass
@with_setup(setup, teardown)
def test_hello_travis(self):
x = 'hello'
y = 'travis'
assert x != y
@with_setup(setup, teardown)
def test_examples_used_in_the_readme_run_without_raising_error_pn(self):
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'death']
categorical = ['ICU', 'death']
groupby = ['death']
nonnormal = ['Age']
mytable = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False)
@with_setup(setup, teardown)
def test_robust_to_duplicates_in_input_df_index(self):
d_control = pd.DataFrame(data={'group': [0, 0, 0, 0, 0, 0, 0],
'value': [3, 4, 4, 4, 4, 4, 5]})
d_case = pd.DataFrame(data={'group': [1, 1, 1], 'value': [1, 2, 3]})
d = pd.concat([d_case, d_control])
with assert_raises(InputError):
t = TableOne(d, ['value'], groupby='group', pval=True)
d_idx_reset = pd.concat([d_case, d_control], ignore_index=True)
t2 = TableOne(d_idx_reset, ['value'], groupby='group', pval=True)
header = "Grouped by group"
mean_std_0 = t2.tableone[header].at[("value, mean (SD)", ""), "0"]
mean_std_1 = t2.tableone[header].at[("value, mean (SD)", ""), "1"]
assert mean_std_0 == '4.0 (0.6)'
assert mean_std_1 == '2.0 (1.0)'
@with_setup(setup, teardown)
def test_overall_mean_and_std_as_expected_for_cont_variable(self):
columns = ['normal', 'nonnormal', 'height']
table = TableOne(self.data_sample, columns=columns)
mean = table.cont_describe.loc['normal']['mean']['Overall']
std = table.cont_describe.loc['normal']['std']['Overall']
print(self.data_sample.mean())
print(self.data_sample.std())
assert abs(mean-self.data_sample.normal.mean()) <= 0.02
assert abs(std-self.data_sample.normal.std()) <= 0.02
@with_setup(setup, teardown)
def test_overall_n_and_percent_as_expected_for_binary_cat_variable(self):
categorical = ['likesmarmalade']
table = TableOne(self.data_sample, columns=categorical,
categorical=categorical)
lm = table.cat_describe.loc['likesmarmalade']
notlikefreq = float(lm.loc['0', 'freq'].values[0])
notlikepercent = float(lm.loc['0', 'percent'].values[0])
likefreq = float(lm.loc['1', 'freq'].values[0])
likepercent = float(lm.loc['1', 'percent'].values[0])
assert notlikefreq + likefreq == 10000
assert abs(100 - notlikepercent - likepercent) <= 0.02
assert notlikefreq == 8977
assert likefreq == 1023
@with_setup(setup, teardown)
def test_overall_n_and_percent_for_binary_cat_var_with_nan(self):
"""
Ignore NaNs when counting the number of values and the overall
percentage
"""
categorical = ['likeshoney']
table = TableOne(self.data_sample, columns=categorical,
categorical=categorical)
lh = table.cat_describe.loc['likeshoney']
likefreq = float(lh.loc['1.0', 'freq'].values[0])
likepercent = float(lh.loc['1.0', 'percent'].values[0])
assert likefreq == 5993
assert abs(100-likepercent) <= 0.01
@with_setup(setup, teardown)
def test_with_data_as_only_input_argument(self):
"""
Test with a simple dataset that a table generated with no pre-specified
columns returns the same results as a table generated with specified
columns
"""
table_no_args = TableOne(self.data_groups)
columns = ['group', 'age', 'weight']
categorical = ['group']
table_with_args = TableOne(self.data_groups, columns=columns,
categorical=categorical)
assert table_no_args._columns == table_with_args._columns
assert table_no_args._categorical == table_with_args._categorical
assert (table_no_args.tableone.columns ==
table_with_args.tableone.columns).all()
assert (table_no_args.tableone['Overall'].values ==
table_with_args.tableone['Overall'].values).all()
assert (table_no_args.tableone == table_with_args.tableone).all().all()
@with_setup(setup, teardown)
def test_fisher_exact_for_small_cell_count(self):
"""
Ensure that the package runs Fisher exact if cell counts are <=5
and it is a 2x2
"""
categorical = ['group1', 'group3']
table = TableOne(self.data_small, categorical=categorical,
groupby='group2', pval=True)
# group2 should be tested because it's a 2x2
# group3 is a 2x3 so should not be tested
assert (table._htest_table.loc['group1', 'Test'] == "Fisher's exact")
assert (table._htest_table.loc['group3', 'Test'] ==
'Chi-squared (warning: expected count < 5)')
@with_setup(setup, teardown)
def test_sequence_of_cont_table(self):
"""
Ensure that the columns align with the values
"""
columns = ['age', 'weight']
categorical = []
groupby = 'group'
t = TableOne(self.data_groups, columns=columns,
categorical=categorical, groupby=groupby,
missing=False, decimals=2, label_suffix=False,
overall=False)
# n and weight rows are already ordered, so sorting should
# not change the order
assert (t.tableone.loc['n'].values[0].astype(float) ==
sorted(t.tableone.loc['n'].values[0].astype(float))).any()
assert (t.tableone.loc['age'].values[0] ==
['0.50 (0.71)', '3.50 (1.29)', '8.50 (1.87)',
'15.50 (2.45)']).any()
@with_setup(setup, teardown)
def test_categorical_cell_count(self):
"""
Check the categorical cell counts are correct
"""
categorical = list(np.arange(10))
table = TableOne(self.data_categorical, columns=categorical,
categorical=categorical)
df = table.cat_describe
# drop 'overall' level of column index
df.columns = df.columns.droplevel(level=1)
# each column
for i in np.arange(10):
# each category should have 100 levels
assert df.loc[i].shape[0] == 100
@with_setup(setup, teardown)
def test_hartigan_diptest_for_modality(self):
"""
Ensure that the package runs Fisher exact if cell counts are <=5
and it is a 2x2
"""
dist_1_peak = generate_data(peaks=1, n=[10000])
t1 = hartigan_diptest(dist_1_peak)
assert t1 > 0.95
dist_2_peak = generate_data(peaks=2, n=[10000, 10000])
t2 = hartigan_diptest(dist_2_peak)
assert t2 < 0.05
dist_3_peak = generate_data(peaks=3, n=[10000, 10000, 10000])
t3 = hartigan_diptest(dist_3_peak)
assert t3 < 0.05
@with_setup(setup, teardown)
def test_limit_of_categorical_data_pn(self):
"""
Tests the `limit` keyword arg, which limits the number of categories
presented
"""
data_pn = self.data_pn.copy()
# 6 categories of age based on decade
data_pn['age_group'] = data_pn['Age'].map(lambda x: int(x/10))
# limit
columns = ['age_group', 'Age', 'SysABP', 'Height', 'Weight', 'ICU',
'death']
categorical = ['age_group', 'ICU', 'death']
# test it limits to 3
table = TableOne(data_pn, columns=columns, categorical=categorical,
limit=3, label_suffix=False)
assert table.tableone.loc['age_group', :].shape[0] == 3
# test other categories are not affected if limit > num categories
assert table.tableone.loc['death', :].shape[0] == 2
def test_input_data_not_modified(self):
"""
Check the input dataframe is not modified by the package
"""
df_orig = self.data_groups.copy()
# turn off warnings for this test
# warnings.simplefilter("ignore")
# no input arguments
df_no_args = self.data_groups.copy()
table_no_args = TableOne(df_no_args)
assert (df_no_args['group'] == df_orig['group']).all()
# groupby
df_groupby = self.data_groups.copy()
table_groupby = TableOne(df_groupby,
columns=['group', 'age', 'weight'],
categorical=['group'], groupby=['group'])
assert (df_groupby['group'] == df_orig['group']).all()
assert (df_groupby['age'] == df_orig['age']).all()
assert (df_groupby['weight'] == df_orig['weight']).all()
# sorted
df_sorted = self.data_groups.copy()
table_sorted = TableOne(df_sorted, columns=['group', 'age', 'weight'],
categorical=['group'], groupby=['group'],
sort=True)
assert (df_sorted['group'] == df_orig['group']).all()
assert (df_groupby['age'] == df_orig['age']).all()
assert (df_groupby['weight'] == df_orig['weight']).all()
# pval
df_pval = self.data_groups.copy()
table_pval = TableOne(df_pval, columns=['group', 'age', 'weight'],
categorical=['group'], groupby=['group'],
sort=True, pval=True)
assert (df_pval['group'] == df_orig['group']).all()
assert (df_groupby['age'] == df_orig['age']).all()
assert (df_groupby['weight'] == df_orig['weight']).all()
# pval_adjust
df_pval_adjust = self.data_groups.copy()
table_pval_adjust = TableOne(df_pval_adjust,
columns=['group', 'age', 'weight'],
categorical=['group'],
groupby=['group'], sort=True, pval=True,
pval_adjust='bonferroni')
assert (df_pval_adjust['group'] == df_orig['group']).all()
assert (df_groupby['age'] == df_orig['age']).all()
assert (df_groupby['weight'] == df_orig['weight']).all()
# labels
df_labels = self.data_groups.copy()
table_labels = TableOne(df_labels,
columns=['group', 'age', 'weight'],
categorical=['group'], groupby=['group'],
rename={'age': 'age, years'})
assert (df_labels['group'] == df_orig['group']).all()
assert (df_groupby['age'] == df_orig['age']).all()
assert (df_groupby['weight'] == df_orig['weight']).all()
# limit
df_limit = self.data_groups.copy()
table_limit = TableOne(df_limit,
columns=['group', 'age', 'weight'],
categorical=['group'], groupby=['group'],
limit=2)
assert (df_limit['group'] == df_orig['group']).all()
assert (df_groupby['age'] == df_orig['age']).all()
assert (df_groupby['weight'] == df_orig['weight']).all()
# nonnormal
df_nonnormal = self.data_groups.copy()
table_nonnormal = TableOne(df_nonnormal,
columns=['group', 'age', 'weight'],
categorical=['group'], groupby=['group'],
nonnormal=['age'])
assert (df_nonnormal['group'] == df_orig['group']).all()
assert (df_groupby['age'] == df_orig['age']).all()
assert (df_groupby['weight'] == df_orig['weight']).all()
# warnings.simplefilter("default")
@with_setup(setup, teardown)
def test_groupby_with_group_named_isnull_pn(self):
"""
Test case with a group having the same name as a column in TableOne
"""
df = self.data_pn.copy()
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU']
groupby = 'ICU'
group_levels = df[groupby].unique()
# collect the possible column names
table = TableOne(df, columns=columns, groupby=groupby, pval=True)
tableone_columns = list(table.tableone.columns.levels[1])
table = TableOne(df, columns=columns, groupby=groupby, pval=True,
pval_adjust='b')
tableone_columns = (tableone_columns +
list(table.tableone.columns.levels[1]))
tableone_columns = np.unique(tableone_columns)
tableone_columns = [c for c in tableone_columns
if c not in group_levels]
for c in tableone_columns:
# for each output column name in tableone, try them as a group
df.loc[0:20, 'ICU'] = c
if 'adjust' in c:
pval_adjust = 'b'
else:
pval_adjust = None
with assert_raises(InputError):
table = TableOne(df, columns=columns, groupby=groupby,
pval=True, pval_adjust=pval_adjust)
@with_setup(setup, teardown)
def test_label_dictionary_input_pn(self):
"""
Test columns and rows are relabelled with the label argument
"""
df = self.data_pn.copy()
columns = ['Age', 'ICU', 'death']
categorical = ['death', 'ICU']
groupby = 'death'
labels = {'death': 'mortality', 'Age': 'Age, years',
'ICU': 'Intensive Care Unit'}
table = TableOne(df, columns=columns, categorical=categorical,
groupby=groupby, rename=labels, label_suffix=False)
# check the header column is updated (groupby variable)
assert table.tableone.columns.levels[0][0] == 'Grouped by mortality'
# check the categorical rows are updated
assert 'Intensive Care Unit' in table.tableone.index.levels[0]
# check the continuous rows are updated
assert 'Age, years' in table.tableone.index.levels[0]
@with_setup(setup, teardown)
def test_tableone_row_sort_pn(self):
"""
Test sort functionality of TableOne
"""
df = self.data_pn.copy()
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'death']
table = TableOne(df, columns=columns, label_suffix=False)
# a call to .index.levels[0] automatically sorts the levels
# instead, call values and use pd.unique as it preserves order
tableone_rows = pd.unique([x[0] for x in table.tableone.index.values])
# default should not sort
for i, c in enumerate(columns):
# i+1 because we skip the first row, 'n'
assert tableone_rows[i+1] == c
table = TableOne(df, columns=columns, sort=True, label_suffix=False)
tableone_rows = pd.unique([x[0] for x in table.tableone.index.values])
for i, c in enumerate(sorted(columns, key=lambda s: s.lower())):
# i+1 because we skip the first row, 'n'
assert tableone_rows[i+1] == c
@with_setup(setup, teardown)
def test_string_data_as_continuous_error(self):
"""
Test raising an error when continuous columns contain non-numeric data
"""
try:
# Trigger the categorical warning
table = TableOne(self.data_mixed, categorical=[])
except InputError as e:
starts_str = "The following continuous column(s) have"
assert e.args[0].startswith(starts_str)
except:
# unexpected error - raise it
raise
@with_setup(setup, teardown)
def test_tableone_columns_in_consistent_order_pn(self):
"""
Test output columns in TableOne are always in the same order
"""
df = self.data_pn.copy()
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'death']
categorical = ['ICU', 'death']
groupby = ['death']
table = TableOne(df, columns=columns, groupby=groupby, pval=True,
htest_name=True, overall=False)
assert table.tableone.columns.levels[1][0] == 'Missing'
assert table.tableone.columns.levels[1][-1] == 'Test'
assert table.tableone.columns.levels[1][-2] == 'P-Value'
df.loc[df['death'] == 0, 'death'] = 2
# without overall column
table = TableOne(df, columns=columns, groupby=groupby, pval=True,
pval_adjust='bonferroni', htest_name=True,
overall=False)
assert table.tableone.columns.levels[1][0] == 'Missing'
assert table.tableone.columns.levels[1][-1] == 'Test'
assert table.tableone.columns.levels[1][-2] == 'P-Value (adjusted)'
# with overall column
table = TableOne(df, columns=columns, groupby=groupby, pval=True,
pval_adjust='bonferroni', htest_name=True,
overall=True)
assert table.tableone.columns.levels[1][0] == 'Missing'
assert table.tableone.columns.levels[1][1] == 'Overall'
assert table.tableone.columns.levels[1][-1] == 'Test'
assert table.tableone.columns.levels[1][-2] == 'P-Value (adjusted)'
@with_setup(setup, teardown)
def test_check_null_counts_are_correct_pn(self):
"""
Test that the isnull column is correctly reporting number of nulls
"""
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'death']
categorical = ['ICU', 'death']
groupby = ['death']
# test when not grouping
table = TableOne(self.data_pn, columns=columns,
categorical=categorical)
# get isnull column only
isnull = table.tableone.iloc[:, 0]
for i, v in enumerate(isnull):
# skip empty rows by checking value is not a string
if 'float' in str(type(v)):
# check each null count is correct
col = isnull.index[i][0]
assert self.data_pn[col].isnull().sum() == v
# test when grouping by a variable
grouped_table = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby)
# get isnull column only
isnull = grouped_table.tableone.iloc[:, 0]
for i, v in enumerate(isnull):
# skip empty rows by checking value is not a string
if 'float' in str(type(v)):
# check each null count is correct
col = isnull.index[i][0]
assert self.data_pn[col].isnull().sum() == v
# @with_setup(setup, teardown)
# def test_binary_columns_are_not_converted_to_true_false(self):
# """
# Fix issue where 0 and 1 were being converted to False and True
# when set as categorical variables.
# """
# df = pd.DataFrame({'Feature': [True,True,False,True,False,False,
# True,False,False,True],
# 'ID': [1,1,0,0,1,1,0,0,1,0],
# 'Stuff1': [23,54,45,38,32,59,37,76,32,23],
# 'Stuff2': [12,12,67,29,24,39,32,65,12,15]})
# t = TableOne(df, columns=['Feature','ID'], categorical=['Feature',
# 'ID'])
# # not boolean
# assert type(t.tableone.loc['ID'].index[0]) != bool
# assert type(t.tableone.loc['ID'].index[1]) != bool
# # integer
# assert type(t.tableone.loc['ID'].index[0]) == int
# assert type(t.tableone.loc['ID'].index[1]) == int
@with_setup(setup, teardown)
def test_the_decimals_argument_for_continuous_variables(self):
"""
For continuous variables, the decimals argument should set the number
of decimal places for all summary statistics (e.g. mean and standard
deviation).
"""
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'death']
categorical = ['ICU', 'death']
groupby = ['death']
nonnormal = ['Age']
# no decimals argument
# expected result is to default to 1
t_no_arg = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False,
label_suffix=False)
t_no_arg_group0 = t_no_arg.tableone['Grouped by death'].loc["Weight",
"0"].values
t_no_arg_group0_expected = np.array(['83.0 (23.6)'])
t_no_arg_group1 = t_no_arg.tableone['Grouped by death'].loc["Weight",
"1"].values
t_no_arg_group1_expected = np.array(['82.3 (25.4)'])
assert all(t_no_arg_group0 == t_no_arg_group0_expected)
assert all(t_no_arg_group1 == t_no_arg_group1_expected)
# decimals = 1
t1_decimal = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False, decimals=1,
label_suffix=False)
t1_group0 = t1_decimal.tableone['Grouped by death'].loc["Weight",
"0"].values
t1_group0_expected = np.array(['83.0 (23.6)'])
t1_group1 = t1_decimal.tableone['Grouped by death'].loc["Weight",
"1"].values
t1_group1_expected = np.array(['82.3 (25.4)'])
assert all(t1_group0 == t1_group0_expected)
assert all(t1_group1 == t1_group1_expected)
# decimals = 2
t2_decimal = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False, decimals=2,
label_suffix=False)
t2_group0 = t2_decimal.tableone['Grouped by death'].loc["Weight",
"0"].values
t2_group0_expected = np.array(['83.04 (23.58)'])
t2_group1 = t2_decimal.tableone['Grouped by death'].loc["Weight",
"1"].values
t2_group1_expected = np.array(['82.29 (25.40)'])
assert all(t2_group0 == t2_group0_expected)
assert all(t2_group1 == t2_group1_expected)
# decimals = {"Age": 0, "Weight":3}
t3_decimal = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False,
decimals={"Age": 0, "Weight": 3},
label_suffix=False)
t3_group0 = t3_decimal.tableone['Grouped by death'].loc["Weight",
"0"].values
t3_group0_expected = np.array(['83.041 (23.581)'])
t3_group1 = t3_decimal.tableone['Grouped by death'].loc["Weight",
"1"].values
t3_group1_expected = np.array(['82.286 (25.396)'])
assert all(t3_group0 == t3_group0_expected)
assert all(t3_group1 == t3_group1_expected)
@with_setup(setup, teardown)
def test_the_decimals_argument_for_categorical_variables(self):
"""
For categorical variables, the decimals argument should set the number
of decimal places for the percent only.
"""
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'death']
categorical = ['ICU', 'death']
groupby = ['death']
nonnormal = ['Age']
# decimals = 1
t1_decimal = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False, decimals=1,
label_suffix=False)
t1_group0 = t1_decimal.tableone['Grouped by death'].loc["ICU",
"0"].values
t1_group0_expected = np.array(['137 (15.9)', '194 (22.5)',
'318 (36.8)', '215 (24.9)'])
t1_group1 = t1_decimal.tableone['Grouped by death'].loc["ICU",
"1"].values
t1_group1_expected = np.array(['25 (18.4)', '8 (5.9)',
'62 (45.6)', '41 (30.1)'])
assert all(t1_group0 == t1_group0_expected)
assert all(t1_group1 == t1_group1_expected)
# decimals = 2
t2_decimal = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False, decimals=2,
label_suffix=False)
t2_group0 = t2_decimal.tableone['Grouped by death'].loc["ICU",
"0"].values
t2_group0_expected = np.array(['137 (15.86)', '194 (22.45)',
'318 (36.81)', '215 (24.88)'])
t2_group1 = t2_decimal.tableone['Grouped by death'].loc["ICU",
"1"].values
t2_group1_expected = np.array(['25 (18.38)', '8 (5.88)',
'62 (45.59)', '41 (30.15)'])
assert all(t2_group0 == t2_group0_expected)
assert all(t2_group1 == t2_group1_expected)
# decimals = {"ICU":3}
t3_decimal = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False,
decimals={"ICU": 3}, label_suffix=False)
t3_group0 = t3_decimal.tableone['Grouped by death'].loc["ICU",
"0"].values
t3_group0_expected = np.array(['137 (15.856)', '194 (22.454)',
'318 (36.806)', '215 (24.884)'])
t3_group1 = t3_decimal.tableone['Grouped by death'].loc["ICU",
"1"].values
t3_group1_expected = np.array(['25 (18.382)', '8 (5.882)',
'62 (45.588)', '41 (30.147)'])
assert all(t3_group0 == t3_group0_expected)
assert all(t3_group1 == t3_group1_expected)
# decimals = {"Age":3}
# expected result is to default to 1 decimal place
t4_decimal = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, pval=False,
decimals={"Age": 3}, label_suffix=False)
t4_group0 = t4_decimal.tableone['Grouped by death'].loc["ICU",
"0"].values
t4_group0_expected = np.array(['137 (15.9)', '194 (22.5)',
'318 (36.8)', '215 (24.9)'])
t4_group1 = t4_decimal.tableone['Grouped by death'].loc["ICU",
"1"].values
t4_group1_expected = np.array(['25 (18.4)', '8 (5.9)',
'62 (45.6)', '41 (30.1)'])
assert all(t4_group0 == t4_group0_expected)
assert all(t4_group1 == t4_group1_expected)
@with_setup(setup, teardown)
def test_nan_rows_not_deleted_in_categorical_columns(self):
"""
Test that rows in categorical columns are not deleted if there are null
values (issue #79).
"""
# create the dataset
fruit = [['apple', 'durian', 'pineapple', 'banana'],
['pineapple', 'orange', 'peach', 'lemon'],
['lemon', 'peach', 'lemon', 'banana'],
['durian', 'apple', 'orange', 'lemon'],
['banana', 'durian', 'lemon', 'apple'],
['orange', 'pineapple', 'lemon', 'banana'],
['banana', 'orange', 'apple', 'lemon']]
df = pd.DataFrame(fruit)
df.columns = ['basket1', 'basket2', 'basket3', 'basket4']
# set two of the columns to none
df.loc[1:3, 'basket2'] = None
df.loc[2:4, 'basket3'] = None
# create tableone
t1 = TableOne(df, label_suffix=False,
categorical=['basket1', 'basket2', 'basket3', 'basket4'])
assert all(t1.tableone.loc['basket1'].index == ['apple', 'banana',
'durian', 'lemon',
'orange', 'pineapple'])
assert all(t1.tableone.loc['basket2'].index == ['durian', 'orange',
'pineapple'])
assert all(t1.tableone.loc['basket3'].index == ['apple', 'lemon',
'peach', 'pineapple'])
assert all(t1.tableone.loc['basket4'].index == ['apple', 'banana',
'lemon'])
@with_setup(setup, teardown)
def test_pval_correction(self):
"""
Test the pval_adjust argument
"""
df = pd.DataFrame({'numbers': [1, 2, 6, 1, 1, 1],
'other': [1, 2, 3, 3, 3, 4],
'colors': ['red', 'white', 'blue', 'red', 'blue', 'blue'],
'even': ['yes', 'no', 'yes', 'yes', 'no', 'yes']})
t1 = TableOne(df, groupby="even", pval=True, pval_adjust="bonferroni")
# check the multiplier is correct (3 = no. of reported values)
pvals_expected = {'numbers, mean (SD)': '1.000',
'other, mean (SD)': '1.000',
'colors, n (%)': '0.669'}
group = 'Grouped by even'
col = 'P-Value (adjusted)'
for k in pvals_expected:
assert_equal(t1.tableone.loc[k][group][col].values[0],
pvals_expected[k])
# catch the pval_adjust=True
with warnings.catch_warnings(record=False) as w:
warnings.simplefilter('ignore', category=UserWarning)
t2 = TableOne(df, groupby="even", pval=True, pval_adjust=True)
for k in pvals_expected:
assert_equal(t1.tableone.loc[k][group][col].values[0],
pvals_expected[k])
@with_setup(setup, teardown)
def test_custom_statistical_tests(self):
"""
Test that the user can specify custom statistical functions.
"""
# from the example provided at:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html
# define custom test
func = mytest
np.random.seed(12345678)
n1 = 200
n2 = 300
# Baseline distribution
rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
df1 = pd.DataFrame({'rvs': 'rvs1', 'val': rvs1})
# Different to rvs1
# stats.ks_2samp(rvs1, rvs2)
# (0.20833333333333334, 5.129279597781977e-05)
rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
df2 = pd.DataFrame({'rvs': 'rvs2', 'val': rvs2})
# Similar to rvs1
# stats.ks_2samp(rvs1, rvs3)
# (0.10333333333333333, 0.14691437867433876)
rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
df3 = pd.DataFrame({'rvs': 'rvs3', 'val': rvs3})
# Identical to rvs1
# stats.ks_2samp(rvs1, rvs4)
# (0.07999999999999996, 0.41126949729859719)
rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
df4 = pd.DataFrame({'rvs': 'rvs4', 'val': rvs4})
# Table 1 for different distributions
different = df1.append(df2, ignore_index=True)
t1_diff = TableOne(data=different, columns=["val"], pval=True,
groupby="rvs", htest={"val": func})
assert_almost_equal(t1_diff._htest_table['P-Value'].val,
stats.ks_2samp(rvs1, rvs2)[1])
# Table 1 for similar distributions
similar = df1.append(df3, ignore_index=True)
t1_similar = TableOne(data=similar, columns=["val"], pval=True,
groupby="rvs", htest={"val": func})
assert_almost_equal(t1_similar._htest_table['P-Value'].val,
stats.ks_2samp(rvs1, rvs3)[1])
# Table 1 for identical distributions
identical = df1.append(df4, ignore_index=True)
t1_identical = TableOne(data=identical, columns=["val"], pval=True,
groupby="rvs", htest={"val": func})
assert_almost_equal(t1_identical._htest_table['P-Value'].val,
stats.ks_2samp(rvs1, rvs4)[1])
@with_setup(setup, teardown)
def test_compute_standardized_mean_difference_continuous(self):
"""
Test that pairwise standardized mean difference is computer correctly
for continuous variables.
# Ref: Introduction to Meta-Analysis. Michael Borenstein,
# L. V. Hedges, J. P. T. Higgins and H. R. Rothstein
# Wiley (2011). Chapter 4. Effect Sizes Based on Means.
"""
# Example from Hedges 2011:
# "For example, suppose that a study has sample means X1=103.00,
# X2=100.00, sample standard deviations S1=5.5, S2=4.5, and
# sample sizes n1=50 and n2=50".
t = TableOne(pd.DataFrame([1, 2, 3]))
mean1 = 103.0
mean2 = 100.0
n1 = 50
n2 = 50
sd1 = 5.5
sd2 = 4.5
smd, se = t._cont_smd(mean1=mean1, mean2=mean2, sd1=sd1, sd2=sd2,
n1=n1, n2=n2)
assert_equal(round(smd, 4), -0.5970)
assert_equal(round(se, 4), 0.2044)
# Test unbiased estimate using Hedges correction (Hedges, 2011)
smd, se = t._cont_smd(mean1=mean1, mean2=mean2, sd1=sd1, sd2=sd2,
n1=n1, n2=n2, unbiased=True)
assert_equal(round(smd, 4), -0.5924)
assert_equal(round(se, 4), 0.2028)
# Test on input data
data1 = [1, 2, 3, 4, 5, 6, 7, 8]
data2 = [2, 2, 3, 4, 5, 6, 7, 8, 9, 10]
smd_data, se_data = t._cont_smd(data1=data1, data2=data2)
mean1 = np.mean(data1)
mean2 = np.mean(data2)
n1 = len(data1)
n2 = len(data2)
sd1 = np.std(data1)
sd2 = np.std(data2)
smd_summary, se_summary = t._cont_smd(mean1=mean1, mean2=mean2,
sd1=sd1, sd2=sd2, n1=n1, n2=n2)
assert_equal(round(smd_data, 4), round(smd_summary, 4))
assert_equal(round(se_data, 4), round(se_summary, 4))
# test with the physionet data
cols = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'MechVent', 'LOS',
'death']
categorical = ['ICU', 'MechVent', 'death']
strata = "MechVent"
t = TableOne(self.data_pn, categorical=categorical, label_suffix=False,
groupby=strata, pval=True, htest_name=False, smd=True)
# consistent with R StdDiff() and R tableone
exp_smd = {'Age': '-0.129',
'SysABP': '-0.177',
'Height': '-0.073',
'Weight': '0.124',
'LOS': '0.121'}
for k in exp_smd:
smd = t.tableone.loc[k, 'Grouped by MechVent']['SMD (0,1)'][0]
assert_equal(smd, exp_smd[k])
@with_setup(setup, teardown)
def test_compute_standardized_mean_difference_categorical(self):
"""
Test that pairwise standardized mean difference is computer correctly
for categorical variables.
# Ref: Introduction to Meta-Analysis. Michael Borenstein,
# L. V. Hedges, J. P. T. Higgins and H. R. Rothstein
# Wiley (2011). Chapter 4. Effect Sizes Based on Means.
"""
t = TableOne(pd.DataFrame([1, 2, 3]))
# test with the physionet data
cols = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'MechVent', 'LOS',
'death']
categorical = ['ICU', 'MechVent', 'death']
strata = "MechVent"
t = TableOne(self.data_pn, categorical=categorical, label_suffix=False,
groupby=strata, pval=True, htest_name=False, smd=True)
# consistent with R StdDiff() and R tableone
exp_smd = {'ICU': '0.747',
'MechVent': 'nan',
'death': '0.017'}
for k in exp_smd:
smd = t.tableone.loc[k, 'Grouped by MechVent']['SMD (0,1)'][0]
assert_equal(smd, exp_smd[k])
@with_setup(setup, teardown)
def test_order_of_order_categorical_columns(self):
"""
Test that the order of ordered categorical columns is retained.
"""
day_cat = pd.Categorical(["mon", "wed", "tue", "thu"],
categories=["wed", "thu", "mon", "tue"], ordered=True)
alph_cat = pd.Categorical(["a", "b", "c", "a"],
categories=["b", "c", "d", "a"], ordered=False)
mon_cat = pd.Categorical(["jan", "feb", "mar", "apr"],
categories=["feb", "jan", "mar", "apr"], ordered=True)
data = pd.DataFrame({"A": ["a", "b", "c", "a"]})
data["day"] = day_cat
data["alph"] = alph_cat
data["month"] = mon_cat
order = {"month": ["jan"], "day": ["mon", "tue", "wed"]}
# if a custom order is not specified, the categorical order
# specified above should apply
t1 = TableOne(data, label_suffix=False)
t1_expected_order = {'month': ["feb", "jan", "mar", "apr"],
'day': ["wed", "thu", "mon", "tue"]}
for k in order:
assert_list_equal(t1._order[k], t1_expected_order[k])
assert_list_equal(t1.tableone.loc[k].index.to_list(),
t1_expected_order[k])
# if a desired order is set, it should override the order
t2 = TableOne(data, order=order, label_suffix=False)
t2_expected_order = {'month': ["jan", "feb", "mar", "apr"],
'day': ["mon", "tue", "wed", "thu"]}
for k in order:
assert_list_equal(t2._order[k], t2_expected_order[k])
assert_list_equal(t2.tableone.loc[k].index.to_list(),
t2_expected_order[k])
@with_setup(setup, teardown)
def test_min_max_for_nonnormal_variables(self):
"""
Test the min_max argument returns expected results.
"""
# columns to summarize
columns = ['Age', 'SysABP', 'Height', 'Weight', 'ICU', 'death']
# columns containing categorical variables
categorical = ['ICU']
# set decimal places for age to 0
decimals = {"Age": 0}
# non-normal variables
nonnormal = ['Age']
# optionally, a categorical variable for stratification
groupby = ['death']
t1 = TableOne(self.data_pn, columns=columns, categorical=categorical,
groupby=groupby, nonnormal=nonnormal, decimals=decimals,
min_max=['Age'])
k = "Age, median [min,max]"
group = "Grouped by death"
t1_columns = ["Overall", "0", "1"]
expected = ["68 [16,90]", "66 [16,90]", "75 [26,90]"]
for c, e in zip(t1_columns, expected):
cell = t1.tableone.loc[k][group][c].values[0]
assert_equal(cell, e)
@with_setup(setup, teardown)
def test_row_percent_false(self):
"""
Test row_percent=False displays n(%) for the column.
"""
# columns to summarize
columns = ['Age', 'SysABP', 'Height', 'MechVent', 'ICU', 'death']
# columns containing categorical variables
categorical = ['ICU', 'MechVent']
# set decimal places for age to 0
decimals = {"Age": 0}
# non-normal variables
nonnormal = ['Age']
# optionally, a categorical variable for stratification
groupby = ['death']
group = "Grouped by death"
# row_percent = False
t1 = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, decimals=decimals,
row_percent=False)
row1 = list(t1.tableone.loc["MechVent, n (%)"][group].values[0])
row1_expect = [0, '540 (54.0)', '468 (54.2)', '72 (52.9)']
assert_list_equal(row1, row1_expect)
row2 = list(t1.tableone.loc["MechVent, n (%)"][group].values[1])
row2_expect = ['', '460 (46.0)', '396 (45.8)', '64 (47.1)']
assert_list_equal(row2, row2_expect)
row3 = list(t1.tableone.loc["ICU, n (%)"][group].values[0])
row3_expect = [0, '162 (16.2)', '137 (15.9)', '25 (18.4)']
assert_list_equal(row3, row3_expect)
row4 = list(t1.tableone.loc["ICU, n (%)"][group].values[1])
row4_expect = ['', '202 (20.2)', '194 (22.5)', '8 (5.9)']
assert_list_equal(row4, row4_expect)
row5 = list(t1.tableone.loc["ICU, n (%)"][group].values[2])
row5_expect = ['', '380 (38.0)', '318 (36.8)', '62 (45.6)']
assert_list_equal(row5, row5_expect)
row6 = list(t1.tableone.loc["ICU, n (%)"][group].values[3])
row6_expect = ['', '256 (25.6)', '215 (24.9)', '41 (30.1)']
assert_list_equal(row6, row6_expect)
@with_setup(setup, teardown)
def test_row_percent_true(self):
"""
Test row_percent=True displays n(%) for the row rather than the column.
"""
# columns to summarize
columns = ['Age', 'SysABP', 'Height', 'MechVent', 'ICU', 'death']
# columns containing categorical variables
categorical = ['ICU', 'MechVent']
# set decimal places for age to 0
decimals = {"Age": 0}
# non-normal variables
nonnormal = ['Age']
# optionally, a categorical variable for stratification
groupby = ['death']
group = "Grouped by death"
# row_percent = True
t2 = TableOne(self.data_pn, columns=columns,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, decimals=decimals,
row_percent=True)
row1 = list(t2.tableone.loc["MechVent, n (%)"][group].values[0])
row1_expect = [0, '540 (100.0)', '468 (86.7)', '72 (13.3)']
assert_list_equal(row1, row1_expect)
row2 = list(t2.tableone.loc["MechVent, n (%)"][group].values[1])
row2_expect = ['', '460 (100.0)', '396 (86.1)', '64 (13.9)']
assert_list_equal(row2, row2_expect)
row3 = list(t2.tableone.loc["ICU, n (%)"][group].values[0])
row3_expect = [0, '162 (100.0)', '137 (84.6)', '25 (15.4)']
assert_list_equal(row3, row3_expect)
row4 = list(t2.tableone.loc["ICU, n (%)"][group].values[1])
row4_expect = ['', '202 (100.0)', '194 (96.0)', '8 (4.0)']
assert_list_equal(row4, row4_expect)
row5 = list(t2.tableone.loc["ICU, n (%)"][group].values[2])
row5_expect = ['', '380 (100.0)', '318 (83.7)', '62 (16.3)']
assert_list_equal(row5, row5_expect)
row6 = list(t2.tableone.loc["ICU, n (%)"][group].values[3])
row6_expect = ['', '256 (100.0)', '215 (84.0)', '41 (16.0)']
assert_list_equal(row6, row6_expect)
@with_setup(setup, teardown)
def test_row_percent_true_and_overall_false(self):
"""
Test row_percent=True displays n(%) for the row rather than the column.
"""
# columns to summarize
columns = ['Age', 'SysABP', 'Height', 'MechVent', 'ICU', 'death']
# columns containing categorical variables
categorical = ['ICU', 'MechVent']
# set decimal places for age to 0
decimals = {"Age": 0}
# non-normal variables
nonnormal = ['Age']
# optionally, a categorical variable for stratification
groupby = ['death']
group = "Grouped by death"
# row_percent = True
t1 = TableOne(self.data_pn, columns=columns, overall=False,
categorical=categorical, groupby=groupby,
nonnormal=nonnormal, decimals=decimals,
row_percent=True)
row1 = list(t1.tableone.loc["MechVent, n (%)"][group].values[0])
row1_expect = [0, '468 (86.7)', '72 (13.3)']
assert_list_equal(row1, row1_expect)
row2 = list(t1.tableone.loc["MechVent, n (%)"][group].values[1])
row2_expect = ['', '396 (86.1)', '64 (13.9)']
assert_list_equal(row2, row2_expect)
row3 = list(t1.tableone.loc["ICU, n (%)"][group].values[0])
row3_expect = [0, '137 (84.6)', '25 (15.4)']
assert_list_equal(row3, row3_expect)
row4 = list(t1.tableone.loc["ICU, n (%)"][group].values[1])
row4_expect = ['', '194 (96.0)', '8 (4.0)']
assert_list_equal(row4, row4_expect)
row5 = list(t1.tableone.loc["ICU, n (%)"][group].values[2])
row5_expect = ['', '318 (83.7)', '62 (16.3)']
assert_list_equal(row5, row5_expect)
row6 = list(t1.tableone.loc["ICU, n (%)"][group].values[3])
row6_expect = ['', '215 (84.0)', '41 (16.0)']
assert_list_equal(row6, row6_expect)
| mit |
trungnt13/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
kmather73/zipline | tests/test_history.py | 17 | 38530 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from itertools import product
from textwrap import dedent
import warnings
from nose_parameterized import parameterized
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from zipline.history import history
from zipline.history.history_container import HistoryContainer
from zipline.protocol import BarData
import zipline.utils.factory as factory
from zipline import TradingAlgorithm
from zipline.finance.trading import (
SimulationParameters,
TradingEnvironment,
with_environment,
)
from zipline.errors import IncompatibleHistoryFrequency
from zipline.sources import RandomWalkSource, DataFrameSource
from .history_cases import (
HISTORY_CONTAINER_TEST_CASES,
)
# Cases are over the July 4th holiday, to ensure use of trading calendar.
# March 2013
# Su Mo Tu We Th Fr Sa
# 1 2
# 3 4 5 6 7 8 9
# 10 11 12 13 14 15 16
# 17 18 19 20 21 22 23
# 24 25 26 27 28 29 30
# 31
# April 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30
#
# May 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
#
# June 2013
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
#
# Times to be converted via:
# pd.Timestamp('2013-07-05 9:31', tz='US/Eastern').tz_convert('UTC')},
INDEX_TEST_CASES_RAW = {
'week of daily data': {
'input': {'bar_count': 5,
'frequency': '1d',
'algo_dt': '2013-07-05 9:31AM'},
'expected': [
'2013-06-28 4:00PM',
'2013-07-01 4:00PM',
'2013-07-02 4:00PM',
'2013-07-03 1:00PM',
'2013-07-05 9:31AM',
]
},
'five minutes on july 5th open': {
'input': {'bar_count': 5,
'frequency': '1m',
'algo_dt': '2013-07-05 9:31AM'},
'expected': [
'2013-07-03 12:57PM',
'2013-07-03 12:58PM',
'2013-07-03 12:59PM',
'2013-07-03 1:00PM',
'2013-07-05 9:31AM',
]
},
}
def to_timestamp(dt_str):
return pd.Timestamp(dt_str, tz='US/Eastern').tz_convert('UTC')
def convert_cases(cases):
"""
Convert raw strings to values comparable with system data.
"""
cases = cases.copy()
for case in cases.values():
case['input']['algo_dt'] = to_timestamp(case['input']['algo_dt'])
case['expected'] = pd.DatetimeIndex([to_timestamp(dt_str) for dt_str
in case['expected']])
return cases
INDEX_TEST_CASES = convert_cases(INDEX_TEST_CASES_RAW)
def get_index_at_dt(case_input):
history_spec = history.HistorySpec(
case_input['bar_count'],
case_input['frequency'],
None,
False,
data_frequency='minute',
)
return history.index_at_dt(history_spec, case_input['algo_dt'])
class TestHistoryIndex(TestCase):
@classmethod
def setUpClass(cls):
cls.environment = TradingEnvironment.instance()
@parameterized.expand(
[(name, case['input'], case['expected'])
for name, case in INDEX_TEST_CASES.items()]
)
def test_index_at_dt(self, name, case_input, expected):
history_index = get_index_at_dt(case_input)
history_series = pd.Series(index=history_index)
expected_series = pd.Series(index=expected)
pd.util.testing.assert_series_equal(history_series, expected_series)
class TestHistoryContainer(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment.instance()
def bar_data_dt(self, bar_data, require_unique=True):
"""
Get a dt to associate with the given BarData object.
If require_unique == True, throw an error if multiple unique dt's are
encountered. Otherwise, return the earliest dt encountered.
"""
dts = {sid_data['dt'] for sid_data in bar_data.values()}
if require_unique and len(dts) > 1:
self.fail("Multiple unique dts ({0}) in {1}".format(dts, bar_data))
return sorted(dts)[0]
@parameterized.expand(
[(name,
case['specs'],
case['sids'],
case['dt'],
case['updates'],
case['expected'])
for name, case in HISTORY_CONTAINER_TEST_CASES.items()]
)
def test_history_container(self,
name,
specs,
sids,
dt,
updates,
expected):
for spec in specs:
# Sanity check on test input.
self.assertEqual(len(expected[spec.key_str]), len(updates))
container = HistoryContainer(
{spec.key_str: spec for spec in specs}, sids, dt, 'minute',
)
for update_count, update in enumerate(updates):
bar_dt = self.bar_data_dt(update)
container.update(update, bar_dt)
for spec in specs:
pd.util.testing.assert_frame_equal(
container.get_history(spec, bar_dt),
expected[spec.key_str][update_count],
check_dtype=False,
check_column_type=True,
check_index_type=True,
check_frame_type=True,
)
def test_multiple_specs_on_same_bar(self):
"""
Test that a ffill and non ffill spec both get
the correct results when called on the same tick
"""
spec = history.HistorySpec(
bar_count=3,
frequency='1m',
field='price',
ffill=True,
data_frequency='minute'
)
no_fill_spec = history.HistorySpec(
bar_count=3,
frequency='1m',
field='price',
ffill=False,
data_frequency='minute'
)
specs = {spec.key_str: spec, no_fill_spec.key_str: no_fill_spec}
initial_sids = [1, ]
initial_dt = pd.Timestamp(
'2013-06-28 9:31AM', tz='US/Eastern').tz_convert('UTC')
container = HistoryContainer(
specs, initial_sids, initial_dt, 'minute'
)
bar_data = BarData()
container.update(bar_data, initial_dt)
# Add data on bar two of first day.
second_bar_dt = pd.Timestamp(
'2013-06-28 9:32AM', tz='US/Eastern').tz_convert('UTC')
bar_data[1] = {
'price': 10,
'dt': second_bar_dt
}
container.update(bar_data, second_bar_dt)
third_bar_dt = pd.Timestamp(
'2013-06-28 9:33AM', tz='US/Eastern').tz_convert('UTC')
del bar_data[1]
# add nan for 3rd bar
container.update(bar_data, third_bar_dt)
prices = container.get_history(spec, third_bar_dt)
no_fill_prices = container.get_history(no_fill_spec, third_bar_dt)
self.assertEqual(prices.values[-1], 10)
self.assertTrue(np.isnan(no_fill_prices.values[-1]),
"Last price should be np.nan")
def test_container_nans_and_daily_roll(self):
spec = history.HistorySpec(
bar_count=3,
frequency='1d',
field='price',
ffill=True,
data_frequency='minute'
)
specs = {spec.key_str: spec}
initial_sids = [1, ]
initial_dt = pd.Timestamp(
'2013-06-28 9:31AM', tz='US/Eastern').tz_convert('UTC')
container = HistoryContainer(
specs, initial_sids, initial_dt, 'minute'
)
bar_data = BarData()
container.update(bar_data, initial_dt)
# Since there was no backfill because of no db.
# And no first bar of data, so all values should be nans.
prices = container.get_history(spec, initial_dt)
nan_values = np.isnan(prices[1])
self.assertTrue(all(nan_values), nan_values)
# Add data on bar two of first day.
second_bar_dt = pd.Timestamp(
'2013-06-28 9:32AM', tz='US/Eastern').tz_convert('UTC')
bar_data[1] = {
'price': 10,
'dt': second_bar_dt
}
container.update(bar_data, second_bar_dt)
prices = container.get_history(spec, second_bar_dt)
# Prices should be
# 1
# 2013-06-26 20:00:00+00:00 NaN
# 2013-06-27 20:00:00+00:00 NaN
# 2013-06-28 13:32:00+00:00 10
self.assertTrue(np.isnan(prices[1].ix[0]))
self.assertTrue(np.isnan(prices[1].ix[1]))
self.assertEqual(prices[1].ix[2], 10)
third_bar_dt = pd.Timestamp(
'2013-06-28 9:33AM', tz='US/Eastern').tz_convert('UTC')
del bar_data[1]
container.update(bar_data, third_bar_dt)
prices = container.get_history(spec, third_bar_dt)
# The one should be forward filled
# Prices should be
# 1
# 2013-06-26 20:00:00+00:00 NaN
# 2013-06-27 20:00:00+00:00 NaN
# 2013-06-28 13:33:00+00:00 10
self.assertEquals(prices[1][third_bar_dt], 10)
# Note that we did not fill in data at the close.
# There was a bug where a nan was being introduced because of the
# last value of 'raw' data was used, instead of a ffilled close price.
day_two_first_bar_dt = pd.Timestamp(
'2013-07-01 9:31AM', tz='US/Eastern').tz_convert('UTC')
bar_data[1] = {
'price': 20,
'dt': day_two_first_bar_dt
}
container.update(bar_data, day_two_first_bar_dt)
prices = container.get_history(spec, day_two_first_bar_dt)
# Prices Should Be
# 1
# 2013-06-27 20:00:00+00:00 nan
# 2013-06-28 20:00:00+00:00 10
# 2013-07-01 13:31:00+00:00 20
self.assertTrue(np.isnan(prices[1].ix[0]))
self.assertEqual(prices[1].ix[1], 10)
self.assertEqual(prices[1].ix[2], 20)
# Clear out the bar data
del bar_data[1]
day_three_first_bar_dt = pd.Timestamp(
'2013-07-02 9:31AM', tz='US/Eastern').tz_convert('UTC')
container.update(bar_data, day_three_first_bar_dt)
prices = container.get_history(spec, day_three_first_bar_dt)
# 1
# 2013-06-28 20:00:00+00:00 10
# 2013-07-01 20:00:00+00:00 20
# 2013-07-02 13:31:00+00:00 20
self.assertTrue(prices[1].ix[0], 10)
self.assertTrue(prices[1].ix[1], 20)
self.assertTrue(prices[1].ix[2], 20)
day_four_first_bar_dt = pd.Timestamp(
'2013-07-03 9:31AM', tz='US/Eastern').tz_convert('UTC')
container.update(bar_data, day_four_first_bar_dt)
prices = container.get_history(spec, day_four_first_bar_dt)
# 1
# 2013-07-01 20:00:00+00:00 20
# 2013-07-02 20:00:00+00:00 20
# 2013-07-03 13:31:00+00:00 20
self.assertEqual(prices[1].ix[0], 20)
self.assertEqual(prices[1].ix[1], 20)
self.assertEqual(prices[1].ix[2], 20)
class TestHistoryAlgo(TestCase):
def setUp(self):
np.random.seed(123)
def test_history_daily(self):
bar_count = 3
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count={bar_count}, frequency='1d', field='price')
context.history_trace = []
def handle_data(context, data):
prices = history(bar_count={bar_count}, frequency='1d', field='price')
context.history_trace.append(prices)
""".format(bar_count=bar_count).strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-30', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end, data_frequency='daily')
_, df = factory.create_test_df_source(sim_params)
df = df.astype(np.float64)
source = DataFrameSource(df)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='daily',
sim_params=sim_params
)
output = test_algo.run(source)
self.assertIsNotNone(output)
history_trace = test_algo.history_trace
for i, received in enumerate(history_trace[bar_count - 1:]):
expected = df.iloc[i:i + bar_count]
assert_frame_equal(expected, received)
def test_history_daily_data_1m_window(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=1, frequency='1m', field='price')
def handle_data(context, data):
prices = history(bar_count=3, frequency='1d', field='price')
""".strip()
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-30', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
with self.assertRaises(IncompatibleHistoryFrequency):
algo = TradingAlgorithm(
script=algo_text,
data_frequency='daily',
sim_params=sim_params
)
source = RandomWalkSource(start=start, end=end)
algo.run(source)
def test_basic_history(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=2, frequency='1d', field='price')
def handle_data(context, data):
prices = history(bar_count=2, frequency='1d', field='price')
prices['prices_times_two'] = prices[1] * 2
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params,
identifiers=[0]
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
oldest_dt = pd.Timestamp(
'2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
# Random, depends on seed
self.assertEquals(139.36946942498648, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_one_day(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=1, frequency='1d', field='price')
def handle_data(context, data):
prices = history(bar_count=1, frequency='1d', field='price')
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
# oldest and newest should be the same if there is only 1 bar
oldest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
# Random, depends on seed
self.assertEquals(180.15661995395106, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_positional_args(self):
"""
Ensure that positional args work.
"""
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
oldest_dt = pd.Timestamp(
'2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
self.assertEquals(139.36946942498648, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_history_with_volume(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'volume')
def handle_data(context, data):
volume = history(3, '1d', 'volume')
record(current_volume=volume[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_volume'],
212218404.0)
def test_history_with_high(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'high')
def handle_data(context, data):
highs = history(3, '1d', 'high')
record(current_high=highs[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_high'],
139.5370641791925)
def test_history_with_low(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'low')
def handle_data(context, data):
lows = history(3, '1d', 'low')
record(current_low=lows[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_low'],
99.891436939669944)
def test_history_with_open(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'open_price')
def handle_data(context, data):
opens = history(3, '1d', 'open_price')
record(current_open=opens[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_open'],
99.991436939669939)
def test_history_passed_to_func(self):
"""
Had an issue where MagicMock was causing errors during validation
with rolling mean.
"""
algo_text = """
from zipline.api import history, add_history
import pandas as pd
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
pd.rolling_mean(prices, 2)
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
# At this point, just ensure that there is no crash.
self.assertIsNotNone(output)
def test_history_passed_to_talib(self):
"""
Had an issue where MagicMock was causing errors during validation
with talib.
We don't officially support a talib integration, yet.
But using talib directly should work.
"""
algo_text = """
import talib
import numpy as np
from zipline.api import history, add_history, record
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2)
record(ma=ma_result[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
# Eddie: this was set to 04-10 but I don't see how that makes
# sense as it does not generate enough data to get at -2 index
# below.
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='daily'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
# At this point, just ensure that there is no crash.
self.assertIsNotNone(output)
recorded_ma = output.ix[-2, 'ma']
self.assertFalse(pd.isnull(recorded_ma))
# Depends on seed
np.testing.assert_almost_equal(recorded_ma,
159.76304468946876)
@parameterized.expand([
('daily',),
('minute',),
])
def test_history_container_constructed_at_runtime(self, data_freq):
algo_text = dedent(
"""\
from zipline.api import history
def handle_data(context, data):
context.prices = history(2, '1d', 'price')
"""
)
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency=data_freq,
emission_rate=data_freq
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency=data_freq,
sim_params=sim_params
)
source = RandomWalkSource(start=start, end=end, freq=data_freq)
self.assertIsNone(test_algo.history_container)
test_algo.run(source)
self.assertIsNotNone(
test_algo.history_container,
msg='HistoryContainer was not constructed at runtime',
)
container = test_algo.history_container
self.assertEqual(
len(container.digest_panels),
1,
msg='The HistoryContainer created too many digest panels',
)
freq, digest = list(container.digest_panels.items())[0]
self.assertEqual(
freq.unit_str,
'd',
)
self.assertEqual(
digest.window_length,
1,
msg='The digest panel is not large enough to service the given'
' HistorySpec',
)
@parameterized.expand([
(1,),
(2,),
])
def test_history_grow_length_inter_bar(self, incr):
"""
Tests growing the length of a digest panel with different date_buf
deltas once per bar.
"""
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
context.bar_count = 1
def handle_data(context, data):
prices = history(context.bar_count, '1d', 'price')
context.test_case.assertEqual(len(prices), context.bar_count)
context.bar_count += {incr}
"""
).format(incr=incr)
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='daily'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
test_algo.test_case = self
source = RandomWalkSource(start=start, end=end)
self.assertIsNone(test_algo.history_container)
test_algo.run(source)
@parameterized.expand([
(1,),
(2,),
])
def test_history_grow_length_intra_bar(self, incr):
"""
Tests growing the length of a digest panel with different date_buf
deltas in a single bar.
"""
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
context.bar_count = 1
def handle_data(context, data):
prices = history(context.bar_count, '1d', 'price')
context.test_case.assertEqual(len(prices), context.bar_count)
context.bar_count += {incr}
prices = history(context.bar_count, '1d', 'price')
context.test_case.assertEqual(len(prices), context.bar_count)
"""
).format(incr=incr)
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='daily'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
test_algo.test_case = self
source = RandomWalkSource(start=start, end=end)
self.assertIsNone(test_algo.history_container)
test_algo.run(source)
class TestHistoryContainerResize(TestCase):
@parameterized.expand(
(freq, field, data_frequency, construct_digest)
for freq in ('1m', '1d')
for field in HistoryContainer.VALID_FIELDS
for data_frequency in ('minute', 'daily')
for construct_digest in (True, False)
if not (freq == '1m' and data_frequency == 'daily')
)
def test_history_grow_length(self,
freq,
field,
data_frequency,
construct_digest):
bar_count = 2 if construct_digest else 1
spec = history.HistorySpec(
bar_count=bar_count,
frequency=freq,
field=field,
ffill=True,
data_frequency=data_frequency,
)
specs = {spec.key_str: spec}
initial_sids = [1]
initial_dt = pd.Timestamp(
'2013-06-28 13:31'
if data_frequency == 'minute'
else '2013-06-28 12:00AM',
tz='UTC',
)
container = HistoryContainer(
specs, initial_sids, initial_dt, data_frequency,
)
if construct_digest:
self.assertEqual(
container.digest_panels[spec.frequency].window_length, 1,
)
bar_data = BarData()
container.update(bar_data, initial_dt)
to_add = (
history.HistorySpec(
bar_count=bar_count + 1,
frequency=freq,
field=field,
ffill=True,
data_frequency=data_frequency,
),
history.HistorySpec(
bar_count=bar_count + 2,
frequency=freq,
field=field,
ffill=True,
data_frequency=data_frequency,
),
)
for spec in to_add:
container.ensure_spec(spec, initial_dt, bar_data)
self.assertEqual(
container.digest_panels[spec.frequency].window_length,
spec.bar_count - 1,
)
self.assert_history(container, spec, initial_dt)
@parameterized.expand(
(bar_count, freq, pair, data_frequency)
for bar_count in (1, 2)
for freq in ('1m', '1d')
for pair in product(HistoryContainer.VALID_FIELDS, repeat=2)
for data_frequency in ('minute', 'daily')
if not (freq == '1m' and data_frequency == 'daily')
)
def test_history_add_field(self, bar_count, freq, pair, data_frequency):
first, second = pair
spec = history.HistorySpec(
bar_count=bar_count,
frequency=freq,
field=first,
ffill=True,
data_frequency=data_frequency,
)
specs = {spec.key_str: spec}
initial_sids = [1]
initial_dt = pd.Timestamp(
'2013-06-28 13:31'
if data_frequency == 'minute'
else '2013-06-28 12:00AM',
tz='UTC',
)
container = HistoryContainer(
specs, initial_sids, initial_dt, data_frequency,
)
if bar_count > 1:
self.assertEqual(
container.digest_panels[spec.frequency].window_length, 1,
)
bar_data = BarData()
container.update(bar_data, initial_dt)
new_spec = history.HistorySpec(
bar_count,
frequency=freq,
field=second,
ffill=True,
data_frequency=data_frequency,
)
container.ensure_spec(new_spec, initial_dt, bar_data)
if bar_count > 1:
digest_panel = container.digest_panels[new_spec.frequency]
self.assertEqual(digest_panel.window_length, bar_count - 1)
self.assertIn(second, digest_panel.items)
else:
self.assertNotIn(new_spec.frequency, container.digest_panels)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.assert_history(container, new_spec, initial_dt)
@parameterized.expand(
(bar_count, pair, field, data_frequency)
for bar_count in (1, 2)
for pair in product(('1m', '1d'), repeat=2)
for field in HistoryContainer.VALID_FIELDS
for data_frequency in ('minute', 'daily')
if not ('1m' in pair and data_frequency == 'daily')
)
def test_history_add_freq(self, bar_count, pair, field, data_frequency):
first, second = pair
spec = history.HistorySpec(
bar_count=bar_count,
frequency=first,
field=field,
ffill=True,
data_frequency=data_frequency,
)
specs = {spec.key_str: spec}
initial_sids = [1]
initial_dt = pd.Timestamp(
'2013-06-28 13:31'
if data_frequency == 'minute'
else '2013-06-28 12:00AM',
tz='UTC',
)
container = HistoryContainer(
specs, initial_sids, initial_dt, data_frequency,
)
if bar_count > 1:
self.assertEqual(
container.digest_panels[spec.frequency].window_length, 1,
)
bar_data = BarData()
container.update(bar_data, initial_dt)
new_spec = history.HistorySpec(
bar_count,
frequency=second,
field=field,
ffill=True,
data_frequency=data_frequency,
)
container.ensure_spec(new_spec, initial_dt, bar_data)
if bar_count > 1:
digest_panel = container.digest_panels[new_spec.frequency]
self.assertEqual(digest_panel.window_length, bar_count - 1)
else:
self.assertNotIn(new_spec.frequency, container.digest_panels)
self.assert_history(container, new_spec, initial_dt)
@with_environment()
def assert_history(self, container, spec, dt, env=None):
hst = container.get_history(spec, dt)
self.assertEqual(len(hst), spec.bar_count)
back = spec.frequency.prev_bar
for n in reversed(hst.index):
self.assertEqual(dt, n)
dt = back(dt)
| apache-2.0 |
Quadrocube/rep | rep/estimators/neurolab.py | 1 | 11902 | # Copyright 2014-2015 Yandex LLC and contributors <https://yandex.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from abc import ABCMeta
from .interface import Classifier, Regressor
from .utils import check_inputs
from copy import deepcopy, copy
import neurolab as nl
import numpy as np
import scipy
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.base import clone
__author__ = 'Vlad Sterzhanov'
NET_TYPES = {'feed-forward': nl.net.newff,
'single-layer': nl.net.newp,
'competing-layer': nl.net.newc,
'learning-vector': nl.net.newlvq,
'elman-recurrent': nl.net.newelm,
'hemming-recurrent': nl.net.newhem,
'hopfield-recurrent': nl.net.newhop}
NET_PARAMS = ('minmax', 'cn', 'layers', 'transf', 'target',
'max_init', 'max_iter', 'delta', 'cn0', 'pc')
BASIC_PARAMS = ('net_type', 'trainf', 'initf', 'scaler')
CANT_CLASSIFY = ('learning-vector', 'hopfield-recurrent', 'competing-layer', 'hemming-recurrent')
class NeurolabBase(object):
__metaclass__ = ABCMeta
def __init__(self, net_type, initf, trainf, **kwargs):
self.train_params = {}
self.net_params = {}
self.trainf = trainf
self.initf = initf
self.net_type = net_type
self.net = None
self.scaler = None
self.set_params(**kwargs)
def set_params(self, **params):
"""
Set the parameters of this estimator
:param dict params: parameters to set in model
"""
if 'scaler' in params:
scaler = params['scaler']
self.scaler = (StandardScaler() if scaler is None else scaler)
params.pop('scaler')
for name, value in params.items():
if name in {'random_state'}:
continue
if name.startswith("scaler__"):
assert hasattr(self.scaler, 'set_params'), \
"Trying to set {} without scaler".format(name)
self.scaler.set_params({name[len("scaler__"):]: value})
if name in NET_PARAMS:
self.net_params[name] = value
elif name in BASIC_PARAMS:
setattr(self, name, value)
else:
self.train_params[name] = value
def get_params(self, deep=True):
"""
Get parameters of this estimator
:return dict
"""
parameters = deepcopy(self.net_params)
parameters.update(deepcopy(self.train_params))
for name in BASIC_PARAMS:
parameters[name] = getattr(self, name)
return parameters
def _fit(self, X, y, y_train):
x_train = self._transform_input(self._get_train_features(X), y)
# Prepare parameters depending on network purpose (classification \ regression)
net_params = self._prepare_params(self.net_params, x_train, y_train)
init = self._get_initializers(self.net_type)
net = init(**net_params)
# To allow similar initf function on all layers
initf_iterable = self.initf if hasattr(self.initf, '__iter__') else [self.initf]*len(net.layers)
for l, f in zip(net.layers, initf_iterable):
l.initf = f
net.init()
if self.trainf is not None:
net.trainf = self.trainf
net.train(x_train, y_train, **self.train_params)
self.net = net
return self
def _sim(self, X):
assert self.net is not None, 'Classifier not fitted, predict denied'
transformed_x = self._transform_input(X, fit=False)
return self.net.sim(transformed_x)
def _transform_input(self, X, y=None, fit=True):
if self.scaler is False:
return X
# FIXME: Need this while using sklearn < 0.16
X = np.copy(X)
if fit:
self.scaler = clone(self.scaler)
self.scaler.fit(X, y)
# HACK: neurolab requires all features (even those of predicted objects) to be in [min, max]
# so this dark magic appeared, seems to work ok for most reasonable usecases
return scipy.special.expit(self.scaler.transform(X) / 3)
def _prepare_params(self, net_params, x_train, y_train):
params = deepcopy(net_params)
# Network expects features to be [0, 1]-scaled
params['minmax'] = [[0, 1]]*(x_train.shape[1])
# To unify the layer-description argument with other supported networks
if 'layers' in params:
params['size'] = params['layers']
params.pop('layers')
# For some reason Neurolab asks for a separate cn parameter instead of accessing size[-1]
# (e.g. In case of Single-Layer Perceptron)
if 'cn' in params:
params['cn'] = y_train.shape[1]
# Set output layer size
if 'size' in params:
params['size'] += [y_train.shape[1]]
return params
@staticmethod
def _get_initializers(net_type):
if net_type not in NET_TYPES:
raise AttributeError('Got unexpected network type: \'{}\''.format(net_type))
return NET_TYPES.get(net_type)
class NeurolabRegressor(NeurolabBase, Regressor):
"""
NeurolabRegressor is a wrapper on Neurolab network-like regressors
Parameters:
-----------
:param string net_type: type of network
One of {'feed-forward', 'single-layer', 'competing-layer', 'learning-vector',
'elman-recurrent', 'hopfield-recurrent', 'hemming-recurrent'}
:param features: features used in training
:type features: list[str] or None
:param initf: layer initializers
:type initf: anything implementing call(layers). e.g. nl.init.* or list[nl.init.*] of shape [n_layers]
:param trainf: net train function
:param scaler: transformer to apply to the input objects
:type scaler: sklearn-like scaler or False (do not scale features -- use with care and keep track of minmax param)
:param list layers: list of numbers denoting size of each hidden layer
:param dict kwargs: additional arguments to net __init__, varies with different net_types
See: https://pythonhosted.org/neurolab/lib.html
"""
def __init__(self, net_type='feed-forward',
features=None,
initf=nl.init.init_zeros,
trainf=None,
scaler=None,
**kwargs):
Regressor.__init__(self, features=features)
NeurolabBase.__init__(self, net_type=net_type, initf=initf, trainf=trainf, scaler=scaler, **kwargs)
def fit(self, X, y):
"""
Fit model on data
:param X: pandas.DataFrame
:param y: iterable denoting corresponding value in object
:return: self
"""
# TODO Some networks do not support regression?
X, y, _ = check_inputs(X, y, None)
y_train = y.reshape(len(y), 1 if len(y.shape) == 1 else y.shape[1])
return self._fit(X, y, y_train)
def predict(self, X):
"""
Predict model
:param pandas.DataFrame X: data shape [n_samples, n_features]
:return: numpy.array of shape n_samples with values
"""
modeled = self._sim(self._get_train_features(X))
return modeled if modeled.shape[1] != 1 else np.ravel(modeled)
def staged_predict(self, X, step=10):
"""
Predicts probabilities on each stage
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param int step: step for returned iterations
:return: iterator
.. warning:: Doesn't have support in Neurolab (**AttributeError** will be thrown)
"""
raise AttributeError("Not supported by Neurolab networks")
class NeurolabClassifier(NeurolabBase, Classifier):
"""
NeurolabClassifier is a wrapper on Neurolab network-like classifiers
Parameters:
-----------
:param string net_type: type of network
One of {'feed-forward', 'single-layer', 'competing-layer', 'learning-vector',
'elman-recurrent', 'hopfield-recurrent', 'hemming-recurrent'}
:param features: features used in training
:type features: list[str] or None
:param initf: layer initializers
:type initf: anything implementing call(layers). e.g. nl.init.* or list[nl.init.*] of shape [n_layers]
:param trainf: net train function
:param scaler: transformer to apply to the input objects
:type scaler: sklearn-like scaler or False (do not scale features -- use with care and keep track of minmax param)
:param list layers: list of numbers denoting size of each hidden layer
:param dict kwargs: additional arguments to net __init__, varies with different net_types
See: https://pythonhosted.org/neurolab/lib.html
"""
def __init__(self, net_type='feed-forward',
features=None,
initf=nl.init.init_zeros,
trainf=None,
scaler=None,
**kwargs):
Classifier.__init__(self, features=features)
NeurolabBase.__init__(self, net_type=net_type, initf=initf, trainf=trainf, scaler=scaler, **kwargs)
self.classes_ = None
def fit(self, X, y):
"""
Fit model on data
:param X: pandas.DataFrame
:param y: iterable denoting corresponding object classes
:return: self
"""
# Some networks do not support classification
assert self.net_type not in CANT_CLASSIFY, 'Network type does not support classification'
X, y, _ = check_inputs(X, y, None)
self._set_classes(y)
y_train = NeurolabClassifier._one_hot_transform(y)
return self._fit(X, y, y_train)
def predict_proba(self, X):
"""
Predict probabilities for each class label on dataset
:param X: pandas.DataFrame of shape [n_samples, n_features]
:rtype: numpy.array of shape [n_samples, n_classes] with probabilities
"""
return self._sim(self._get_train_features(X))
def staged_predict_proba(self, X):
"""
Predicts probabilities on each stage
:param pandas.DataFrame X: data shape [n_samples, n_features]
:return: iterator
.. warning:: Doesn't have support in Neurolab (**AttributeError** will be thrown)
"""
raise AttributeError("Not supported by Neurolab networks")
@staticmethod
def _one_hot_transform(y):
return np.array(OneHotEncoder(n_values=len(np.unique(y))).fit_transform(y.reshape((len(y), 1))).todense())
def _prepare_params(self, params, x_train, y_train):
net_params = super(NeurolabClassifier, self)._prepare_params(params, x_train, y_train)
# Default parameters for transfer functions in classifier networks
if 'transf' not in net_params:
net_params['transf'] = \
[nl.trans.TanSig()] * len(net_params['size']) if 'size' in net_params else nl.trans.SoftMax()
# Classification networks should have SoftMax as the transfer function on output layer
if hasattr(net_params['transf'], '__iter__'):
net_params['transf'][-1] = nl.trans.SoftMax()
else:
net_params['transf'] = nl.trans.SoftMax()
return net_params
| apache-2.0 |
krenzlin/beaqlejs | tools/Python/eval_mushra.py | 2 | 3651 | import json
import os
import csv
import numpy as np
import matplotlib.pyplot as plt
ResultsExt = ".txt"
ResultsFolder = "results/"
# Check if folder with results exists and retrieve list of all result files
if os.path.exists(ResultsFolder):
dirListing = os.listdir(ResultsFolder)
dirListing = [name for name in dirListing if name.endswith(ResultsExt)]
if len(dirListing)<1:
print("ERROR: empty ResultsFolder!")
raise SystemExit()
else:
print("ERROR: invalid ResultsFolder!")
raise SystemExit()
# Import and decode all result files
ResJSONList = list()
for ResFileName in dirListing:
ResFile = open(os.path.join(ResultsFolder, ResFileName))
try:
ResJSONList.append(json.load(ResFile))
except:
print(os.path.join(ResultsFolder, ResFileName)+" is not a valid JSON file.")
finally:
ResFile.close()
# Group results by test sets
numResults = len(ResJSONList)
numTests = sum(1 for dict in ResJSONList[0] if 'TestID' in dict)
RatingsDict = dict()
RuntimesDict = dict()
for ResJSONData in ResJSONList:
for i in range(0, numTests):
if 'TestID' in ResJSONData[i]:
testID = ResJSONData[i]['TestID']
if not testID in RatingsDict:
RatingsDict[testID] = dict()
if not testID in RuntimesDict:
RuntimesDict[testID] = list()
# if runtime entry exists
if 'Runtime' in ResJSONData[i]:
RuntimesDict[testID].append(ResJSONData[i]['Runtime'])
# if rating entry exists
if 'rating' in ResJSONData[i]:
for testItem in ResJSONData[i]['rating']:
if not testItem in RatingsDict[testID]:
RatingsDict[testID][testItem] = list()
RatingsDict[testID][testItem].append(ResJSONData[i]['rating'][testItem])
elif 'UserComment' in ResJSONData[i]:
print('Comment: '+ResJSONData[i]['UserName']+'<'+ResJSONData[i]['UserEmail']+'>')
print(ResJSONData[i]['UserComment'])
print('--------------------')
# plot and evaluate every single test set, output results to a csv file
plotsX = np.ceil(np.sqrt(numTests))
plotsY = np.ceil(numTests / plotsX)
plotInd = 0
for testID in sorted(RatingsDict):
# write test set results to a csv file
CsvFile = open(testID+'.csv', 'w')
CsvWriter = csv.writer(CsvFile)
testResArr = None
labels = list()
for testDataKey in sorted(RatingsDict[testID]):
testData = RatingsDict[testID][testDataKey]
CsvWriter.writerow(list(testDataKey) + testData)
npTestData = np.array(testData)
labels.append(testDataKey)
if testResArr == None:
testResArr = npTestData
else:
testResArr = np.column_stack((testResArr, npTestData.T))
CsvFile.close()
plotInd += 1
if testResArr.shape[0] > 1:
plt.figure(0)
plt.subplot(plotsX, plotsY, plotInd)
plt.boxplot(testResArr)
plt.title(testID)
plt.xticks(range(1, len(labels)+1), labels, rotation=45)
else:
print("WARNING: not enough ratings for test " + testID + " to create a boxplot!")
plt.tight_layout()
# plot runtime of tests
runtimesArr = list()
labels = list()
for testID in sorted(RuntimesDict):
timesInSec = [x / 1000 for x in RuntimesDict[testID]]
runtimesArr.append(timesInSec)
labels.append(testID)
plt.figure(1)
plt.boxplot(runtimesArr)
plt.title("Runtime per test")
plt.xticks(range(1, len(labels)+1), labels, rotation=45)
plt.ylabel("Time in sec")
plt.show()
| gpl-3.0 |
fyffyt/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
SpatialMetabolomics/SM_distributed | sm/engine/ion_centroids_gen.py | 2 | 5765 | import logging
from pathlib import Path
import boto3
from botocore.exceptions import ClientError
from itertools import product, repeat
from pyspark.sql import SparkSession
import pandas as pd
from sm.engine.util import SMConfig, split_s3_path
from sm.engine.isocalc_wrapper import IsocalcWrapper
logger = logging.getLogger('engine')
class IonCentroidsGenerator(object):
""" Generator of theoretical isotope peaks for all molecules in a database.
Args
----------
sc : pyspark.SparkContext
moldb_name : str
isocalc: IsocalcWrapper
"""
def __init__(self, sc, moldb_name, isocalc):
self._sc = sc
self._moldb_name = moldb_name
self._isocalc = isocalc
self._sm_config = SMConfig.get_conf()
self._parquet_chunks_n = 64
self._iso_gen_part_n = 512
self._spark_session = SparkSession(self._sc)
self._ion_centroids_path = '{}/{}/{}/{}'.format(self._sm_config['isotope_storage']['path'],
self._moldb_name,
self._isocalc.sigma,
self._isocalc.charge)
self.ion_df = None
self.ion_centroids_df = None
def exists(self):
""" Check if ion centroids saved to parquet
"""
if self._ion_centroids_path.startswith('s3a://'):
cred_dict = dict(aws_access_key_id=self._sm_config['aws']['aws_access_key_id'],
aws_secret_access_key=self._sm_config['aws']['aws_secret_access_key'])
bucket, key = split_s3_path(self._ion_centroids_path)
s3 = boto3.client('s3', **cred_dict)
try:
s3.head_object(Bucket=bucket, Key=key + '/ions/_SUCCESS')
except ClientError:
return False
else:
return True
else:
return Path(self._ion_centroids_path + '/ions/_SUCCESS').exists()
def generate(self, isocalc, sfs, adducts):
""" Generate isotopic peaks
Args
---
isocalc: IsocalcWrapper
Cannot be a class field as Spark doesn't allow to pass 'self' to functions
adducts: list
"""
logger.info('Generating molecular isotopic peaks')
def calc_centroids(args):
ion_i, sf, adduct = args
mzs, ints = isocalc.ion_centroids(sf, adduct)
if mzs is not None:
return zip(repeat(ion_i),
range(0, len(mzs)),
map(float, mzs),
map(float, ints))
else:
return []
ion_df = pd.DataFrame([(i, sf, adduct) for i, (sf, adduct) in
enumerate(sorted(product(sfs, adducts)))],
columns=['ion_i', 'sf', 'adduct']).set_index('ion_i')
ion_centroids_rdd = (self._sc.parallelize(ion_df.reset_index().values,
numSlices=self._iso_gen_part_n)
.flatMap(calc_centroids))
self.ion_centroids_df = (pd.DataFrame(data=ion_centroids_rdd.collect(),
columns=['ion_i', 'peak_i', 'mz', 'int'])
.sort_values(by='mz')
.set_index('ion_i'))
self.ion_df = ion_df.loc[self.ion_centroids_df.index.unique()]
# Use when pandas DataFrames get way too big
# ion_centroids_df = self._spark_session.createDataFrame(data=ion_centroids_rdd,
# schema=self.ion_centroids_df_fields)
# self.ion_centroids_df = (ion_centroids_df
# .sort(ion_centroids_df.mz.asc())
# .coalesce(self._parquet_chunks_n))
def save(self):
""" Save isotopic peaks
"""
logger.info('Saving peaks')
centr_spark_df = self._spark_session.createDataFrame(self.ion_centroids_df.reset_index())
centr_spark_df.write.parquet(self._ion_centroids_path + '/ion_centroids', mode='overwrite')
ion_spark_df = self._spark_session.createDataFrame(self.ion_df.reset_index())
ion_spark_df.write.parquet(self._ion_centroids_path + '/ions', mode='overwrite')
def restore(self):
logger.info('Restoring peaks')
self.ion_df = self._spark_session.read.parquet(
self._ion_centroids_path + '/ions').toPandas().set_index('ion_i')
self.ion_centroids_df = self._spark_session.read.parquet(
self._ion_centroids_path + '/ion_centroids').toPandas().set_index('ion_i')
def sf_adduct_centroids_df(self):
return self.ion_df.join(self.ion_centroids_df).set_index(['sf', 'adduct'])
def centroids_subset(self, ions):
""" Restore isotopic peaks dataframe only for the 'ions'
Args
---
ions: list of tuples
Returns
---
: pandas.DataFrame
"""
assert self.ion_df is not None
ion_map = self.ion_df.reset_index().set_index(['sf', 'adduct']).ion_i
ion_ids = ion_map.loc[ions].values
return self.ion_centroids_df.loc[ion_ids].sort_values(by='mz')
def generate_if_not_exist(self, isocalc, sfs, adducts):
if not self.exists():
self.generate(isocalc=isocalc, sfs=sfs, adducts=adducts)
self.save()
else:
self.restore()
def ions(self, adducts):
return (self.ion_df[self.ion_df.adduct.isin(adducts)]
.sort_values(by=['sf', 'adduct'])
.to_records(index=False))
| apache-2.0 |
mathieu1/zappa_conda | api.py | 1 | 1397 | from flask import Flask
import importlib
import pandas as pd
import yaml
import json
import subprocess
import re
app = Flask(__name__)
@app.route('/pkg_versions')
def pkg_versions():
with open('environment.yml') as f:
environment = yaml.load(f)
versions = {}
non_packages = {}
for pkg_name in environment['dependencies']:
pkg_name = re.sub('[=<>][^\n]+', '',pkg_name)
try:
pkg = importlib.import_module(pkg_name)
versions[pkg_name] = pkg.__version__
except Exception as e:
try:
non_packages[pkg_name] = subprocess.check_output([pkg_name,'--version']).decode('utf-8')
except Exception as e:
try:
non_packages[pkg_name] = subprocess.check_output([pkg_name,'version']).decode('utf-8')
except Exception as e:
non_packages[pkg_name] = str(e)
return json.dumps({'package_versions':versions, 'non-python-packages':non_packages})
@app.route('/test_pandas')
def test_pandas():
df = pd.DataFrame({'a':[1,2,3,4],'b':[9,10,0,0]})
answer = {'df':df.to_dict(),
'a_sum': df['a'].sum(),
'a+b':(df['a']+df['b']).tolist(),
'sum of a grouped by b': df.groupby('b')['a'].sum().to_dict()}
return json.dumps(answer)
if __name__ == '__main__':
app.run(debug = True)
| mit |
smorton2/think-stats | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
AustereCuriosity/astropy | astropy/visualization/wcsaxes/transforms.py | 4 | 8991 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
# Note: This file incldues code dervived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from ... import units as u
from ...wcs import WCS
from ...wcs.utils import wcs_to_celestial_frame
from ...extern import six
from ...coordinates import (SkyCoord, frame_transform_graph,
SphericalRepresentation,
UnitSphericalRepresentation,
BaseCoordinateFrame)
@six.add_metaclass(abc.ABCMeta)
class CurvedTransform(Transform):
"""
Abstract base class for non-affine curved transforms
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
@abc.abstractmethod
def transform(self, input):
raise NotImplementedError("")
@abc.abstractmethod
def inverted(self):
raise NotImplementedError("")
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
def __init__(self, wcs, slice=None):
super(WCSWorld2PixelTransform, self).__init__()
self.wcs = wcs
if self.wcs.wcs.naxis > 2:
if slice is None:
raise ValueError("WCS has more than 2 dimensions, so ``slice`` should be set")
elif len(slice) != self.wcs.wcs.naxis:
raise ValueError("slice should have as many elements as WCS "
"has dimensions (should be {0})".format(self.wcs.wcs.naxis))
else:
self.slice = slice
self.x_index = slice.index('x')
self.y_index = slice.index('y')
else:
self.slice = None
@property
def input_dims(self):
return self.wcs.wcs.naxis
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions in the WCS. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
if world.shape[1] != self.wcs.wcs.naxis:
raise ValueError("Second dimension of input values should match number of WCS coordinates")
if world.shape[0] == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.wcs_world2pix(world, 1) - 1
if self.slice is None:
return pixel
else:
return pixel[:, (self.x_index, self.y_index)]
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, slice=self.slice)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
def __init__(self, wcs, slice=None):
super(WCSPixel2WorldTransform, self).__init__()
self.wcs = wcs
self.slice = slice
if self.slice is not None:
self.x_index = slice.index('x')
self.y_index = slice.index('y')
@property
def output_dims(self):
return self.wcs.wcs.naxis
def get_coord_slices(self, xmin, xmax, ymin, ymax, nx, ny):
"""
Get a coordinate slice
"""
x = np.linspace(xmin, xmax, nx)
y = np.linspace(ymin, ymax, ny)
Y, X = np.meshgrid(y, x)
pixel = np.array([X.ravel(), Y.ravel()]).transpose()
world = self.transform(pixel)
return X, Y, [world[:, i].reshape(nx, ny).transpose() for i in range(self.wcs.wcs.naxis)]
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions in
the WCS
"""
if self.slice is None:
pixel_full = pixel.copy()
else:
pixel_full = []
for index in self.slice:
if index == 'x':
pixel_full.append(pixel[:, 0])
elif index == 'y':
pixel_full.append(pixel[:, 1])
else:
pixel_full.append(index)
pixel_full = np.array(np.broadcast_arrays(*pixel_full)).transpose()
pixel_full += 1
if pixel_full.shape[0] == 0:
world = np.zeros((0, 2))
else:
world = self.wcs.wcs_pix2world(pixel_full, 1)
# At the moment, one has to manually check that the transformation
# round-trips, otherwise it should be considered invalid.
pixel_check = self.wcs.wcs_world2pix(world, 1)
with np.errstate(invalid='ignore'):
invalid = np.any(np.abs(pixel_check - pixel_full) > 1., axis=1)
world[invalid] = np.nan
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, slice=self.slice)
class CoordinateTransform(CurvedTransform):
def __init__(self, input_system, output_system):
super(CoordinateTransform, self).__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, WCS):
self.input_system = wcs_to_celestial_frame(self._input_system_name)
elif isinstance(self._input_system_name, six.string_types):
self.input_system = frame_transform_graph.lookup_name(self._input_system_name)
if self.input_system is None:
raise ValueError("Frame {0} not found".format(self._input_system_name))
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance")
if isinstance(self._output_system_name, WCS):
self.output_system = wcs_to_celestial_frame(self._output_system_name)
elif isinstance(self._output_system_name, six.string_types):
self.output_system = frame_transform_graph.lookup_name(self._output_system_name)
if self.output_system is None:
raise ValueError("Frame {0} not found".format(self._output_system_name))
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance")
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another
"""
if self.same_frames:
return input_coords
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(x_in, y_in, unit=(u.deg, u.deg),
frame=self.input_system)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all='ignore'):
c_out = c_in.transform_to(self.output_system)
if issubclass(c_out.representation, (SphericalRepresentation, UnitSphericalRepresentation)):
lon = c_out.data.lon.deg
lat = c_out.data.lat.deg
else:
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
| bsd-3-clause |
belltailjp/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/tri/trirefine.py | 20 | 14567 | """
Mesh refinement for triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
import matplotlib.tri.triinterpolate
class TriRefiner(object):
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implements:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns :
- a refined triangulation
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)`` , where:
- *z* array of field values (to refine) defined at the base
triangulation nodes
- *triinterpolator* is a
:class:`~matplotlib.tri.TriInterpolator` (optional)
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
class UniformTriRefiner(TriRefiner):
"""
Uniform mesh refinement by recursive subdivisions.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation`
The encapsulated triangulation (to be refined)
"""
# See Also
# --------
# :class:`~matplotlib.tri.CubicTriInterpolator` and
# :class:`~matplotlib.tri.TriAnalyzer`.
# """
def __init__(self, triangulation):
TriRefiner.__init__(self, triangulation)
def refine_triangulation(self, return_tri_index=False, subdiv=3):
"""
Computes an uniformly refined triangulation *refi_triangulation* of
the encapsulated :attr:`triangulation`.
This function refines the encapsulated triangulation by splitting each
father triangle into 4 child sub-triangles built on the edges midside
nodes, recursively (level of recursion *subdiv*).
In the end, each triangle is hence divided into ``4**subdiv``
child triangles.
The default value for *subdiv* is 3 resulting in 64 refined
subtriangles for each triangle of the initial triangulation.
Parameters
----------
return_tri_index : boolean, optional
Boolean indicating whether an index table indicating the father
triangle index of each point will be returned. Default value
False.
subdiv : integer, optional
Recursion level for the subdivision. Defaults value 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_triangulation : :class:`~matplotlib.tri.Triangulation`
The returned refined triangulation
found_index : array-like of integers
Index of the initial triangulation containing triangle, for each
point of *refi_triangulation*.
Returned only if *return_tri_index* is set to True.
"""
refi_triangulation = self._triangulation
ntri = refi_triangulation.triangles.shape[0]
# Computes the triangulation ancestors numbers in the reference
# triangulation.
ancestors = np.arange(ntri, dtype=np.int32)
for _ in range(subdiv):
refi_triangulation, ancestors = self._refine_triangulation_once(
refi_triangulation, ancestors)
refi_npts = refi_triangulation.x.shape[0]
refi_triangles = refi_triangulation.triangles
# Now we compute found_index table if needed
if return_tri_index:
# We have to initialize found_index with -1 because some nodes
# may very well belong to no triangle at all, e.g., in case of
# Delaunay Triangulation with DuplicatePointWarning.
found_index = - np.ones(refi_npts, dtype=np.int32)
tri_mask = self._triangulation.mask
if tri_mask is None:
found_index[refi_triangles] = np.repeat(ancestors,
3).reshape(-1, 3)
else:
# There is a subtlety here: we want to avoid whenever possible
# that refined points container is a masked triangle (which
# would result in artifacts in plots).
# So we impose the numbering from masked ancestors first,
# then overwrite it with unmasked ancestor numbers.
ancestor_mask = tri_mask[ancestors]
found_index[refi_triangles[ancestor_mask, :]
] = np.repeat(ancestors[ancestor_mask],
3).reshape(-1, 3)
found_index[refi_triangles[~ancestor_mask, :]
] = np.repeat(ancestors[~ancestor_mask],
3).reshape(-1, 3)
return refi_triangulation, found_index
else:
return refi_triangulation
def refine_field(self, z, triinterpolator=None, subdiv=3):
"""
Refines a field defined on the encapsulated triangulation.
Returns *refi_tri* (refined triangulation), *refi_z* (interpolated
values of the field at the node of the refined triangulation).
Parameters
----------
z : 1d-array-like of length ``n_points``
Values of the field to refine, defined at the nodes of the
encapsulated triangulation. (``n_points`` is the number of points
in the initial triangulation)
triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional
Interpolator used for field interpolation. If not specified,
a :class:`~matplotlib.tri.CubicTriInterpolator` will
be used.
subdiv : integer, optional
Recursion level for the subdivision. Defaults to 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_tri : :class:`~matplotlib.tri.Triangulation` object
The returned refined triangulation
refi_z : 1d array of length: *refi_tri* node count.
The returned interpolated field (at *refi_tri* nodes)
Examples
--------
The main application of this method is to plot high-quality
iso-contours on a coarse triangular grid (e.g., triangulation built
from relatively sparse test data):
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py
"""
if triinterpolator is None:
interp = matplotlib.tri.CubicTriInterpolator(
self._triangulation, z)
else:
if not isinstance(triinterpolator,
matplotlib.tri.TriInterpolator):
raise ValueError("Expected a TriInterpolator object")
interp = triinterpolator
refi_tri, found_index = self.refine_triangulation(
subdiv=subdiv, return_tri_index=True)
refi_z = interp._interpolate_multikeys(
refi_tri.x, refi_tri.y, tri_index=found_index)[0]
return refi_tri, refi_z
@staticmethod
def _refine_triangulation_once(triangulation, ancestors=None):
"""
This function refines a matplotlib.tri *triangulation* by splitting
each triangle into 4 child-masked_triangles built on the edges midside
nodes.
The masked triangles, if present, are also splitted but their children
returned masked.
If *ancestors* is not provided, returns only a new triangulation:
child_triangulation.
If the array-like key table *ancestor* is given, it shall be of shape
(ntri,) where ntri is the number of *triangulation* masked_triangles.
In this case, the function returns
(child_triangulation, child_ancestors)
child_ancestors is defined so that the 4 child masked_triangles share
the same index as their father: child_ancestors.shape = (4 * ntri,).
"""
x = triangulation.x
y = triangulation.y
# According to tri.triangulation doc:
# neighbors[i,j] is the triangle that is the neighbor
# to the edge from point index masked_triangles[i,j] to point
# index masked_triangles[i,(j+1)%3].
neighbors = triangulation.neighbors
triangles = triangulation.triangles
npts = np.shape(x)[0]
ntri = np.shape(triangles)[0]
if ancestors is not None:
ancestors = np.asarray(ancestors)
if np.shape(ancestors) != (ntri,):
raise ValueError(
"Incompatible shapes provide for triangulation"
".masked_triangles and ancestors: {0} and {1}".format(
np.shape(triangles), np.shape(ancestors)))
# Initiating tables refi_x and refi_y of the refined triangulation
# points
# hint: each apex is shared by 2 masked_triangles except the borders.
borders = np.sum(neighbors == -1)
added_pts = (3*ntri + borders) // 2
refi_npts = npts + added_pts
refi_x = np.zeros(refi_npts)
refi_y = np.zeros(refi_npts)
# First part of refi_x, refi_y is just the initial points
refi_x[:npts] = x
refi_y[:npts] = y
# Second part contains the edge midside nodes.
# Each edge belongs to 1 triangle (if border edge) or is shared by 2
# masked_triangles (interior edge).
# We first build 2 * ntri arrays of edge starting nodes (edge_elems,
# edge_apexes) ; we then extract only the masters to avoid overlaps.
# The so-called 'master' is the triangle with biggest index
# The 'slave' is the triangle with lower index
# (can be -1 if border edge)
# For slave and master we will identify the apex pointing to the edge
# start
edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32)]))
edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32)*2]))
edge_neighbors = neighbors[edge_elems, edge_apexes]
mask_masters = (edge_elems > edge_neighbors)
# Identifying the "masters" and adding to refi_x, refi_y vec
masters = edge_elems[mask_masters]
apex_masters = edge_apexes[mask_masters]
x_add = (x[triangles[masters, apex_masters]] +
x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
y_add = (y[triangles[masters, apex_masters]] +
y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
refi_x[npts:] = x_add
refi_y[npts:] = y_add
# Building the new masked_triangles ; each old masked_triangles hosts
# 4 new masked_triangles
# there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
# 3 new_pt_midside
new_pt_corner = triangles
# What is the index in refi_x, refi_y of point at middle of apex iapex
# of elem ielem ?
# If ielem is the apex master: simple count, given the way refi_x was
# built.
# If ielem is the apex slave: yet we do not know ; but we will soon
# using the neighbors table.
new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
cum_sum = npts
for imid in range(3):
mask_st_loc = (imid == apex_masters)
n_masters_loc = np.sum(mask_st_loc)
elem_masters_loc = masters[mask_st_loc]
new_pt_midside[:, imid][elem_masters_loc] = np.arange(
n_masters_loc, dtype=np.int32) + cum_sum
cum_sum += n_masters_loc
# Now dealing with slave elems.
# for each slave element we identify the master and then the inode
# onces slave_masters is indentified, slave_masters_apex is such that:
# neighbors[slaves_masters, slave_masters_apex] == slaves
mask_slaves = np.logical_not(mask_masters)
slaves = edge_elems[mask_slaves]
slaves_masters = edge_neighbors[mask_slaves]
diff_table = np.abs(neighbors[slaves_masters, :] -
np.outer(slaves, np.ones(3, dtype=np.int32)))
slave_masters_apex = np.argmin(diff_table, axis=1)
slaves_apex = edge_apexes[mask_slaves]
new_pt_midside[slaves, slaves_apex] = new_pt_midside[
slaves_masters, slave_masters_apex]
# Builds the 4 child masked_triangles
child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
child_triangles[0::4, :] = np.vstack([
new_pt_corner[:, 0], new_pt_midside[:, 0],
new_pt_midside[:, 2]]).T
child_triangles[1::4, :] = np.vstack([
new_pt_corner[:, 1], new_pt_midside[:, 1],
new_pt_midside[:, 0]]).T
child_triangles[2::4, :] = np.vstack([
new_pt_corner[:, 2], new_pt_midside[:, 2],
new_pt_midside[:, 1]]).T
child_triangles[3::4, :] = np.vstack([
new_pt_midside[:, 0], new_pt_midside[:, 1],
new_pt_midside[:, 2]]).T
child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
# Builds the child mask
if triangulation.mask is not None:
child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
if ancestors is None:
return child_triangulation
else:
return child_triangulation, np.repeat(ancestors, 4)
| gpl-3.0 |
JohanComparat/nbody-npt-functions | bin/bin_DF/test_scripts/halo-bias-fit.py | 1 | 18813 | import astropy.units as uu
import astropy.cosmology as co
aa = co.Planck13
from scipy.interpolate import interp1d
import numpy as n
import matplotlib
#matplotlib.use('pdf')
matplotlib.rcParams['font.size']=12
import matplotlib.pyplot as p
import glob
import sys
from scipy.optimize import curve_fit
import cPickle
from os.path import join
from scipy.optimize import minimize
import scipy.fftpack as f
import time
from hankel import SphericalHankelTransform
# Tinker 05 :
aa = 1/2**0.5
bb = 0.35
cc = 0.8
deltac = 1.68
bnu = lambda nu : 1 + aa**(-0.5)/(deltac) * ( aa**0.5*(aa*nu**2.) + aa*0.5 * bb * (aa * nu**2.)**(1-cc) - (aa*nu**2.)**cc / ((aa*nu**2.)**cc + bb*(1-cc)*(1-cc/2.) ) )
bvm = lambda vm, vcut : bnu(vm/vcut)
fun = lambda vm, a, b, c : b* (vm/c) + a*n.e**(vm**0.5/c**0.5)
xs = n.arange(60, 2000, 1)
dir = ".." #join("D:","data","MultiDark")
dir_04 = join(dir,"MD_0.4Gpc")
dir_10 = join(dir,"MD_1Gpc")
dir_25 = join(dir,"MD_2.5Gpc")
dir_40 = join(dir,"MD_4Gpc")
vmins = n.array(["65.0", "72.9", "81.8", "91.8", "103.0", "115.5", "129.6", "145.5", "163.2", "183.1", "205.5", "230.6","258.7", "290.3", "325", "365.5", "410.1", "460.1", "516.3", "579.3", "650.0", "729.3", "818.3", "918.1", "1030.1", "1155.8", "1296.9", "1455.17"])
vlow = vmins.astype('float')[:-1]
vup = vmins.astype('float')[1:]
vmean = (vup*vlow)**0.5
limits_04 = [70, 2000]
limits_10 = [200, 5000]
limits_25 = [300, 5000]
limits_40 = [500, 5000]
zmin=0
zmax=2.5
vlow, vhigh,vmean, scale, bias, biasErr, vol, aon = n.loadtxt(join("..", "clustering", "halo-bias-measurement-summary.data"), unpack=True)
############
# Fit at redshift 0
############
sel0 = (aon==1)
res, cov = curve_fit(fun, vmean[sel0], bias[sel0], p0=(0.5, -0.5, 200), sigma=biasErr[sel0], maxfev=10000000)
"""
print res
print cov.diagonal()**0.5
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
p.plot(xs, fun(xs, res[0], res[1], res[2]), 'k--',label='fit')
sel = (vol==400**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='k', label='SMD', fmt='none')
sel = (vol==1000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='b', label='MDPL', fmt='none')
sel = (vol==2500**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='r', label='BigMD', fmt='none')
sel = (vol==4000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='m', label='HMD', fmt='none')
#-cb = p.colorbar()
#cb.set_label('z')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'bias')
p.xlim((50, 1500))
p.ylim((0.6,4.5))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-z0.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='k', label='SMD', fmt='none')
sel = (vol==1000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='b', label='MDPl', fmt='none')
sel = (vol==2500**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='r', label='BigMD', fmt='none')
sel = (vol==4000**3.)&(aon==1)
p.errorbar(vmean[sel], bias[sel]/fun(vmean[sel], res[0], res[1], res[2]), yerr = biasErr[sel]/bias[sel], c='m', label='HMD', fmt='none')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'residual = data / model')
p.xlim((50, 1500))
p.ylim((0.8,1.2))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-residual-z0.png"))
p.clf()
"""
##############
# Fit at redshift trend
##############
a0, b0, c0 = res
xs = n.arange(60, 2000, 1)
a_pr = lambda zz, a1, a2 : a0 *(1+a1*zz+a2*zz**2.) # + a2 *lz**2. + a3 *lz**3. + a4 *lz**4.
b_pr = lambda zz, b1, b2 : b0 *(1+b1*zz+b2*zz**2.)# +b3*zz**3.) # + b2 *lz**2.+ b3 *lz**3.)
c_pr = lambda zz, c1, c2 : c0 *(1+c1*zz+c2*zz**2) # + b2 *lz**2.+ b3 *lz**3.)
vfG = lambda vm, zz, ps : a_pr(zz,ps[0], ps[1])*n.e**(vm**0.5/c_pr(zz,ps[4], ps[5])**0.5) + (b_pr(zz,ps[2], ps[3]))* (vm /c_pr(zz,ps[4], ps[5]))
#vfG = lambda vm, zz, ps : a_pr(zz,ps[0])*n.e**(vm**0.5/c_pr(zz,ps[3], ps[4])**0.5) + (b_pr(zz,ps[1], ps[2]))* (vm /c_pr(zz,ps[3], ps[4]))
p1=[0,0,0,0,0,0] # [0.28, -0.04, 0.7, -0.47, -0., -0.55, 0.28]
chi2fun = lambda ps : n.sum( (vfG(vmean,1/aon-1,ps) - bias)**2. / (2*biasErr)**2. )/(len(bias) - len(p1))
res2 = minimize(chi2fun, p1, method='Powell',options={'xtol': 1e-6, 'disp': True, 'maxiter' : 50000000000000})
pOpt = res2.x
cov = res2.direc
chi2perpoint = lambda ps : (vfG(vmean,1/aon-1,ps) - bias)**2. / (2*biasErr)**2.
chi2pp = chi2perpoint(pOpt)
n.savetxt(join("..","clustering","bias0-best_fit_params.txt"),n.transpose([pOpt,cov.diagonal()**0.5]))
vs = n.arange(60,1500,2)
X,Y = n.meshgrid(vs,n.arange(zmin, zmax+0.025,0.025))
Z = vfG(X,Y,pOpt)
n.savetxt(join("..","clustering","bias0-best_fit.txt"),n.transpose([n.hstack((X)), n.hstack((Y)), n.hstack((Z))]) )
###############################
#IMPLEMENT SINGLE REDSHIFT FITS TO GET THERIGHT PARAMETRIZATION
#SAVE DEPENDECE WITH REDSHIFT POINTS B-y WRITING THEM OUT
#WRITE REDSHIFT DEPENDENCE EQUATION IN THE PAPER
#######################################################
# now plots the results of the fit
print "now plots the results of the fit"
vmax_mod, z_mod, n_mod = n.loadtxt(join("..","clustering","bias0-best_fit.txt"), unpack=True)
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(vmax_mod, n_mod, c=z_mod,s=5, marker='o',label="model", rasterized=True, vmin = zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel('bias') # slog$_{10}[ n(>M)]')
p.xlim((50, 1500))
p.ylim((0.6,4.5))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-zAll-model.png"))
p.clf()
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
p.errorbar(vmean, bias, yerr = 2*biasErr, fmt='none',elinewidth=0.5, mfc='none',ecolor='k',rasterized = True)
sc1=p.scatter(vmean, bias, c=1/aon-1,s=5, marker='o',label="model", rasterized=True, vmin = zmin, vmax = zmax)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label("redshift")
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel('bias') # slog$_{10}[ n(>M)]')
p.xlim((50, 1500))
p.ylim((0.6,4.5))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","halo-bias-zAll-data.png"))
p.clf()
print "ndof=",len(bias)-len(pOpt)
print "ndata",len(bias)
print "maxchi2distance",n.max(chi2pp)
print "Noutliers=",len((chi2pp>1.5).nonzero()[0])
p.figure(0,(6,6))
p.axes([0.17,0.17,0.75,0.75])
sc1=p.scatter(vmean, 1/aon-1, c=chi2pp,s=5, marker='o', rasterized=True, vmin = 0, vmax = 1.2)
sc1.set_edgecolor('face')
cb = p.colorbar(shrink=0.8)
cb.set_label(r"(data-model)$^2$/(err data)$^2$")
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'z') # log$_{10}[ n(>M)]')
p.xlim((50, 1500))
p.grid()
p.savefig(join("..","clustering","halo-bias-zAll-chi2pp.png"))
p.clf()
sys.exit()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
p.scatter(vmean, bias**0.5, c=1./aon-1, s=20, edgecolors='none')
p.plot(xs, fun(xs, res[0], res[1], res[2]), 'k--')
#p.plot(xs, bvm(xs, res2[0]]), 'r--')
cb = p.colorbar()
cb.set_label('z')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'bias')
p.xlim((50, 3000))
p.ylim((0.6,4.5))
#p.yscale('log')
p.xscale('log')
#gl = p.legend(loc=2,fontsize=10)
#gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","ahalo-bias.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
p.plot(vmean[aon==1], bias[aon==1]**0.5/fun(vmean[aon==1], res[0], res[1], res[2]), 'bo')
#p.plot(vmean[aon==1], bias[aon==1]**0.5/bvm(vmean[aon==1], res2[0]), 'r+')
p.plot(vmean[aon==1], 1+ biasErr[aon==1]*bias[aon==1]**(-0.5), 'k+')
p.plot(vmean[aon==1], 1- biasErr[aon==1]*bias[aon==1]**(-0.5), 'k+')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'residual = data / model')
p.xlim((50, 3000))
p.ylim((0.7,1.3))
#p.yscale('log')
p.xscale('log')
#gl = p.legend(loc=2,fontsize=10)
#gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","ahalo-bias-residual.png"))
p.clf()
print res
print cov.diagonal()**0.5 / res
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel]/n.polyval(ps, vmean[sel]), yerr = biasErr[sel]/ bias[sel], c='m', label='HMD')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max})$/model')
p.xlim((50, 3000))
p.ylim((0.9,1.1))
#p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","ahalo-bias-z0-residual-zoom.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel], bias[sel], yerr = biasErr[sel], c='m', label='HMD')
p.plot(vmean, n.polyval(ps, vmean), 'r--', lw=2,label='fit')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max}$)')
p.xlim((50, 3000))
#p.ylim((0.1,100))
p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","ahalo-bias-z0.png"))
p.clf()
sys.exit()
ps = n.polyfit(vmean**2., bias, degree)#, w = 1./(biasErr))
n.savetxt("fit-halo-bias2-vmax2.data",ps)
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel]/n.polyval(ps, vmean[sel]**2.), yerr = biasErr[sel]/ bias[sel], c='m', label='HMD')
p.xlabel(r'$V_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max})$/model')
#p.xlim((50, 3000))
p.ylim((0.7,1.3))
#p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","bhalo-bias-vm2-z0-residual.png"))
p.clf()
p.figure(0,(5,5))
p.axes([0.18,0.18,0.75,0.75])
sel = (vol==400**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='k', label='SMD')
sel = (vol==1000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='b', label='MDPl')
sel = (vol==2500**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='r', label='BigMD')
sel = (vol==4000**3.)
# print len(bias[sel])
p.errorbar(vmean[sel]**2., bias[sel], yerr = biasErr[sel], c='m', label='HMD')
p.plot(vmean**2., n.polyval(ps, vmean**2.), 'r--', lw=2,label='fit')
p.xlabel(r'$V^2_{max}$ (km/s)')
p.ylabel(r'$b^2(V_{max}$)')
#p.xlim((50, 3000))
#p.ylim((0.1,100))
p.yscale('log')
p.xscale('log')
gl = p.legend(loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","bhalo-bias-vm2-z0.png"))
p.clf()
sys.exit()
for vmin in vmins:
list44 = n.array(glob.glob(join("..","MD_1Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_050_xiR.pkl")))
list42 = n.array(glob.glob(join("..","MD_0.4Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_050_xiR.pkl")))
list41 = n.array(glob.glob(join("..","MD_2.5Gpc","halo_bias","clustering","hlist_80_vmax_"+vmin+"*rmax_140_xiR.pkl")))
list43 = n.array(glob.glob(join("..","MD_4Gpc","halo_bias","clustering","hlist_128_vmax_"+vmin+"*rmax_140_xiR.pkl")))
list40=n.hstack((list41, list42, list43, list44))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.15,0.15,0.6,0.75])
for ii in range(len(list40)):
f=open(list40[ii],'r')
bin_xi3D,xis, DR, volume, dV, pairCount, pairs, Ntotal, nD, nR, vbinsL, vbinsH = cPickle.load(f)
f.close()
if list40[ii].split('\\')[1] == "MD_0.4Gpc":
color = 'w'
volume = 400**3.
if list40[ii].split('\\')[1] == "MD_1Gpc":
color = 'b'
volume = 1000**3.
if list40[ii].split('\\')[1] == "MD_2.5Gpc":
color = 'r'
volume = 2500**3.
if list40[ii].split('\\')[1] == "MD_4Gpc":
color = 'm'
volume = 4000**3.
DR_rb = DR[::2][:-1] + DR[1::2]
dV_rb = dV[::2][:-1] + dV[1::2]
xi_rb = DR_rb*volume/(dV_rb * pairCount) -1.
rr = (bin_xi3D[1:] + bin_xi3D[:-1])/2.
rr_rb = bin_xi3D[::2][1:]
p.plot(rr_rb, rr_rb*rr_rb*xi_rb,label= list40[ii].split('\\')[1], c = color)
p.plot(Rs,Rs*Rs*xiR,'b--',label='DM linear theory')
p.xlabel('r Mpc/h')
p.ylabel(r'$r^2 \xi$(MD) (r)')
p.xlim((0,200))
p.ylim((-50,100))
p.title(str(n.round(vbinsL))+"<vmax<"+str(n.round(vbinsH))+" z=0")
#p.yscale('log')
#p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","xi-MD-"+vmin+".png"))
p.clf()
for vmin in vmins:
list44 = n.array(glob.glob(join("..","MD_1Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list42 = n.array(glob.glob(join("..","MD_0.4Gpc","halo_bias","clustering","hlist_1.00000_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list41 = n.array(glob.glob(join("..","MD_2.5Gpc","halo_bias","clustering","hlist_80_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list43 = n.array(glob.glob(join("..","MD_4Gpc","halo_bias","clustering","hlist_128_vmax_"+vmin+"*rmax_015_xiR.pkl")))
list40=n.hstack((list41, list42, list43, list44))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.15,0.15,0.6,0.75])
for ii in range(len(list40)):
f=open(list40[ii],'r')
bin_xi3D,xis, DR, volume, dV, pairCount, pairs, Ntotal, nD, nR, vbinsL, vbinsH = cPickle.load(f)
f.close()
if list40[ii].split('\\')[1] == "MD_0.4Gpc":
color = 'k'
volume = 400**3.
if list40[ii].split('\\')[1] == "MD_1Gpc":
color = 'b'
volume = 1000**3.
if list40[ii].split('\\')[1] == "MD_2.5Gpc":
color = 'r'
volume = 2500**3.
if list40[ii].split('\\')[1] == "MD_4Gpc":
color = 'm'
volume = 4000**3.
xi = DR*volume/(dV * pairCount) -1.
rr = (bin_xi3D[1:] + bin_xi3D[:-1])/2.
p.plot(rr, rr*xi,label= list40[ii].split('\\')[1], c = color)
p.plot(Rs,Rs*xiR,'b--',label='DM linear theory')
p.xlabel('r Mpc/h')
p.ylabel(r'$r \xi$(MD) (r)')
p.xlim((0.01,20))
p.ylim((1.,200))
p.title(str(n.round(vbinsL))+"<vmax<"+str(n.round(vbinsH))+" z=0")
p.yscale('log')
p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","a_1.00000","xi-lt20-MD-"+vmin+".png"))
p.clf()
sys.exit()
##########################################################3
##########################################################3
##########################################################3
# Z=1
##########################################################3
##########################################################3
##########################################################3
list40 = n.array(glob.glob(join("..","MD_1Gpc","halo_bias","clustering","hlist_1.0*_vmax_*_xiR.pkl")))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.17,0.17,0.6,0.8])
for ii in range(len(list40)):
f=open(list40[ii],'r')
bin_xi3D_04,xis_04, DR_04, volume_04, dV_04, pairCount_04, pairs_04, Ntotal_04, nD_04, nR_04, vbinsL_04, vbinsH_04 = cPickle.load(f)
f.close()
fv_04 = 1000**3. / volume_04
xi04V = fv_04*(xis_04+1)-1.
rr = (bin_xi3D_04[1:] + bin_xi3D_04[:-1])/2.
p.plot(rr,rr*rr*xi04V,label= str(n.round(vbinsL_04))+"<vmax<"+str(n.round(vbinsH_04)))
p.xlabel('r Mpc/h')
p.ylabel(r'$r^2 \xi$(BigMDPL) (r)')
p.xlim((0,200))
p.ylim((-1,200))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","xi-MDPL.png"))
p.show()
list40 = n.array(glob.glob(join("..","MD_2.5Gpc","halo_bias","clustering","hlist_80*_vmax_*_xiR.pkl")))
list40.sort()
# print list40
p.figure(0,(11,6))
p.axes([0.17,0.17,0.6,0.8])
for ii in range(len(list40))[:-3][::2]:
f=open(list40[ii],'r')
bin_xi3D_04,xis_04, DR_04, volume_04, dV_04, pairCount_04, pairs_04, Ntotal_04, nD_04, nR_04, vbinsL_04, vbinsH_04 = cPickle.load(f)
f.close()
fv_04 = 2500**3. / volume_04
xi04V = fv_04*(xis_04+1)-1.
rr = (bin_xi3D_04[1:] + bin_xi3D_04[:-1])/2.
p.plot(rr,rr*rr*xi04V,label= str(n.round(vbinsL_04))+"<vmax<"+str(n.round(vbinsH_04)))
p.xlabel('r Mpc/h')
p.ylabel(r'$r^2 \xi$(BigMDPL) (r)')
p.xlim((0,200))
p.ylim((-1,200))
#p.yscale('log')
#p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","xi-BigMDPL.png"))
p.show()
list04 = n.array(glob.glob(join("..","MD_0.4Gpc","halo_bias","clustering","hlist_1.00*_vmax_*_xiR.pkl")))
list04.sort()
# print list04
p.figure(0,(11,6))
p.axes([0.17,0.17,0.6,0.8])
for ii in range(len(list04)): #[::2]:
f=open(list04[ii],'r')
bin_xi3D_04,xis_04, DR_04, volume_04, dV_04, pairCount_04, pairs_04, Ntotal_04, nD_04, nR_04, vbinsL_04, vbinsH_04 = cPickle.load(f)
f.close()
fv_04 = 400**3. / volume_04
xi04V = fv_04*(xis_04+1)-1.
rr = (bin_xi3D_04[1:] + bin_xi3D_04[:-1])/2.
p.plot(rr,xi04V,label= str(n.round(vbinsL_04))+"<vmax<"+str(n.round(vbinsH_04)))
xrr = n.arange(0,50,0.5)
#p.plot(xrr,20*xrr**(-1.8),'k--',lw=2)
p.axvline(3)
p.axvline(7)
p.xlabel('r Mpc/h')
p.ylabel('xi(MDPL) (r)')
p.xlim((0.1,15))
p.ylim((0.1,200))
p.yscale('log')
p.xscale('log')
gl = p.legend(bbox_to_anchor=(1.05, 1), loc=2,fontsize=10)
gl.set_frame_on(False)
p.grid()
p.savefig(join("..","clustering","xi-SMDPL.png"))
p.show()
| cc0-1.0 |
sinhrks/pyopendata | pyopendata/io/tests/test_jsdmx.py | 1 | 3891 | # pylint: disable-msg=E1101,W0613,W0603
import os
import numpy as np
import pandas as pd
from pandas.compat import range
import pandas.util.testing as tm
from pyopendata.io import read_jsdmx
class TestSDMX(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
def test_tourism(self):
# OECD -> Industry and Services -> Inbound Tourism
result = read_jsdmx(os.path.join(self.dirpath, 'jsdmx', 'tourism.json'))
self.assertTrue(isinstance(result, pd.DataFrame))
exp_col = pd.MultiIndex.from_product([['Japan'],
['China', 'Hong Kong, China',
'Total international arrivals',
'Total international receipts',
'International passenger transport receipts',
'International travel receipts',
'Korea', 'Chinese Taipei', 'United States']],
names=['Country', 'Variable'])
exp_idx = pd.DatetimeIndex(['2004', '2005', '2006', '2007', '2008',
'2009', '2010', '2011', '2012'], name='Year')
values = np.array([[616, 300, 6138, 1550, 330, 1220, 1588, 1081, 760],
[653, 299, 6728, 1710, 340, 1370, 1747, 1275, 822],
[812, 352, 7334, 1330, 350, 980, 2117, 1309, 817],
[942, 432, 8347, 1460, 360, 1100, 2601, 1385, 816],
[1000, 550, 8351, 1430, 310, 1120, 2382, 1390, 768],
[1006, 450, 6790, 1170, 210, 960, 1587, 1024, 700],
[1413, 509, 8611, 1350, 190, 1160, 2440, 1268, 727],
[1043, 365, 6219, 1000, 100, 900, 1658, 994, 566],
[1430, 482, 8368, 1300, 100, 1200, 2044, 1467, 717]])
expected = pd.DataFrame(values, index=exp_idx, columns=exp_col)
tm.assert_frame_equal(result, expected)
def test_land_use(self):
# OECD -> Environment -> Resources Land Use
result = read_jsdmx(os.path.join(self.dirpath, 'jsdmx', 'land_use.json'))
self.assertTrue(isinstance(result, pd.DataFrame))
result = result.ix['2010':'2011']
exp_col = pd.MultiIndex.from_product([['Japan', 'United States'],
['Arable land and permanent crops',
'Arable and cropland % land area',
'Total area', 'Forest', 'Forest % land area',
'Land area', 'Permanent meadows and pastures',
'Meadows and pastures % land area',
'Other areas', 'Other % land area']],
names=['Country', 'Variable'])
exp_idx = pd.DatetimeIndex(['2010', '2011'], name='Year')
values = np.array([[45930, 12.601, 377950, 249790, 68.529, 364500, np.nan, np.nan,
68780, 18.87, 1624330, 17.757, 9831510, 3040220, 33.236, 9147420,
2485000, 27.166, 1997870, 21.841],
[45610, 12.513, 377955, 249878, 68.554, 364500, np.nan, np.nan,
69012, 18.933, 1627625, 17.793, 9831510, 3044048, 33.278, 9147420,
2485000, 27.166, 1990747, 21.763]])
expected = pd.DataFrame(values, index=exp_idx, columns=exp_col)
tm.assert_frame_equal(result, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
| bsd-2-clause |
google/dopamine | dopamine/colab/utils.py | 1 | 10839 | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This provides utilities for dealing with Dopamine data.
See: dopamine/common/logger.py .
"""
import itertools
import os
import pickle
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
FILE_PREFIX = 'log'
ITERATION_PREFIX = 'iteration_'
ALL_GAMES = ['AirRaid', 'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids',
'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Berzerk',
'Bowling', 'Boxing', 'Breakout', 'Carnival', 'Centipede',
'ChopperCommand', 'CrazyClimber', 'DemonAttack', 'DoubleDunk',
'ElevatorAction', 'Enduro', 'FishingDerby', 'Freeway', 'Frostbite',
'Gopher', 'Gravitar', 'Hero', 'IceHockey', 'Jamesbond',
'JourneyEscape', 'Kangaroo', 'Krull', 'KungFuMaster',
'MontezumaRevenge', 'MsPacman', 'NameThisGame', 'Phoenix',
'Pitfall', 'Pong', 'Pooyan', 'PrivateEye', 'Qbert', 'Riverraid',
'RoadRunner', 'Robotank', 'Seaquest', 'Skiing', 'Solaris',
'SpaceInvaders', 'StarGunner', 'Tennis', 'TimePilot', 'Tutankham',
'UpNDown', 'Venture', 'VideoPinball', 'WizardOfWor', 'YarsRevenge',
'Zaxxon']
def load_baselines(base_dir, verbose=False):
"""Reads in the baseline experimental data from a specified base directory.
Args:
base_dir: string, base directory where to read data from.
verbose: bool, whether to print warning messages.
Returns:
A dict containing pandas DataFrames for all available agents and games.
"""
experimental_data = {}
for game in ALL_GAMES:
for agent in ['dqn', 'c51', 'rainbow', 'iqn']:
game_data_file = os.path.join(base_dir, agent, '{}.pkl'.format(game))
if not tf.io.gfile.exists(game_data_file):
if verbose:
# pylint: disable=superfluous-parens
print('Unable to load data for agent {} on game {}'.format(agent,
game))
# pylint: enable=superfluous-parens
continue
with tf.io.gfile.GFile(game_data_file, 'rb') as f:
if sys.version_info.major >= 3:
# pylint: disable=unexpected-keyword-arg
single_agent_data = pickle.load(f, encoding='latin1')
# pylint: enable=unexpected-keyword-arg
else:
single_agent_data = pickle.load(f)
single_agent_data['agent'] = agent
# The dataframe rows are all read as 'objects', which causes a
# ValueError when merging below. We cast the numerics to float64s to
# avoid this.
for field_name in single_agent_data.keys():
try:
single_agent_data[field_name] = (
single_agent_data[field_name].astype(np.float64))
except ValueError:
# This will catch any non-numerics that cannot be cast to float64.
continue
if game in experimental_data:
experimental_data[game] = experimental_data[game].merge(
single_agent_data, how='outer')
else:
experimental_data[game] = single_agent_data
return experimental_data
def load_statistics(log_path, iteration_number=None, verbose=True):
"""Reads in a statistics object from log_path.
Args:
log_path: string, provides the full path to the training/eval statistics.
iteration_number: The iteration number of the statistics object we want
to read. If set to None, load the latest version.
verbose: Whether to output information about the load procedure.
Returns:
data: The requested statistics object.
iteration: The corresponding iteration number.
Raises:
Exception: if data is not present.
"""
# If no iteration is specified, we'll look for the most recent.
if iteration_number is None:
iteration_number = get_latest_iteration(log_path)
log_file = '%s/%s_%d' % (log_path, FILE_PREFIX, iteration_number)
if verbose:
# pylint: disable=superfluous-parens
print('Reading statistics from: {}'.format(log_file))
# pylint: enable=superfluous-parens
with tf.io.gfile.GFile(log_file, 'rb') as f:
return pickle.load(f), iteration_number
def get_latest_file(path):
"""Return the file named 'path_[0-9]*' with the largest such number.
Args:
path: The base path (including directory and base name) to search.
Returns:
The latest file (in terms of given numbers).
"""
try:
latest_iteration = get_latest_iteration(path)
return os.path.join(path, '{}_{}'.format(FILE_PREFIX, latest_iteration))
except ValueError:
return None
def get_latest_iteration(path):
"""Return the largest iteration number corresponding to the given path.
Args:
path: The base path (including directory and base name) to search.
Returns:
The latest iteration number.
Raises:
ValueError: if there is not available log data at the given path.
"""
glob = os.path.join(path, '{}_[0-9]*'.format(FILE_PREFIX))
log_files = tf.io.gfile.glob(glob)
if not log_files:
raise ValueError('No log data found at {}'.format(path))
def extract_iteration(x):
return int(x[x.rfind('_') + 1:])
latest_iteration = max(extract_iteration(x) for x in log_files)
return latest_iteration
def summarize_data(data, summary_keys):
"""Processes log data into a per-iteration summary.
Args:
data: Dictionary loaded by load_statistics describing the data. This
dictionary has keys iteration_0, iteration_1, ... describing per-iteration
data.
summary_keys: List of per-iteration data to be summarized.
Example:
data = load_statistics(...)
summarize_data(data, ['train_episode_returns',
'eval_episode_returns'])
Returns:
A dictionary mapping each key in returns_keys to a per-iteration summary.
"""
summary = {}
latest_iteration_number = len(data.keys())
current_value = None
for key in summary_keys:
summary[key] = []
# Compute per-iteration average of the given key.
for i in range(latest_iteration_number):
iter_key = '{}{}'.format(ITERATION_PREFIX, i)
# We allow reporting the same value multiple times when data is missing.
# If there is no data for this iteration, use the previous'.
if iter_key in data:
current_value = np.mean(data[iter_key][key])
summary[key].append(current_value)
return summary
def read_experiment(log_path,
parameter_set=None,
job_descriptor='',
iteration_number=None,
summary_keys=('train_episode_returns',
'eval_episode_returns'),
verbose=False):
"""Reads in a set of experimental results from log_path.
The provided parameter_set is an ordered_dict which
1) defines the parameters of this experiment,
2) defines the order in which they occur in the job descriptor.
The method reads all experiments of the form
${log_path}/${job_descriptor}.format(params)/logs,
where params is constructed from the cross product of the elements in
the parameter_set.
For example:
parameter_set = collections.OrderedDict([
('game', ['Asterix', 'Pong']),
('epsilon', ['0', '0.1'])
])
read_experiment('/tmp/logs', parameter_set, job_descriptor='{}_{}')
Will try to read logs from:
- /tmp/logs/Asterix_0/logs
- /tmp/logs/Asterix_0.1/logs
- /tmp/logs/Pong_0/logs
- /tmp/logs/Pong_0.1/logs
Args:
log_path: string, base path specifying where results live.
parameter_set: An ordered_dict mapping parameter names to allowable values.
job_descriptor: A job descriptor string which is used to construct the full
path for each trial within an experiment.
iteration_number: Int, if not None determines the iteration number at which
we read in results.
summary_keys: Iterable of strings, iteration statistics to summarize.
verbose: If True, print out additional information.
Returns:
A Pandas dataframe containing experimental results.
"""
keys = [] if parameter_set is None else list(parameter_set.keys())
# Extract parameter value lists, one per parameter.
ordered_values = [parameter_set[key] for key in keys]
column_names = keys + ['iteration'] + list(summary_keys)
num_parameter_settings = len([_ for _ in itertools.product(*ordered_values)])
expected_num_iterations = 200
expected_num_rows = num_parameter_settings * expected_num_iterations
# Create DataFrame with predicted number of rows.
data_frame = pd.DataFrame(index=np.arange(0, expected_num_rows),
columns=column_names)
row_index = 0
# Now take their cross product. This generates tuples of the form
# (p1, p2, p3, ...) where p1, p2, p3 are parameter values for the first,
# second, etc. parameters as ordered in value_set.
for parameter_tuple in itertools.product(*ordered_values):
if job_descriptor is not None:
name = job_descriptor.format(*parameter_tuple)
else:
# Construct name for values.
name = '-'.join([keys[i] + '_' + str(parameter_tuple[i])
for i in range(len(keys))])
experiment_path = '{}/{}/logs'.format(log_path, name)
raw_data, last_iteration = load_statistics(
experiment_path, iteration_number=iteration_number, verbose=verbose)
summary = summarize_data(raw_data, summary_keys)
for iteration in range(last_iteration + 1):
# The row contains all the parameters, the iteration, and finally the
# requested values.
row_data = (list(parameter_tuple) + [iteration] +
[summary[key][iteration] for key in summary_keys])
data_frame.loc[row_index] = row_data
row_index += 1
# The dataframe rows are all read as 'objects', which causes a
# ValueError when merging below. We cast the numerics to float64s to
# avoid this.
for field_name in data_frame.keys():
try:
data_frame[field_name] = data_frame[field_name].astype(np.float64)
except ValueError:
# This will catch any non-numerics that cannot be cast to float64.
continue
# Shed any unused rows.
return data_frame.drop(np.arange(row_index, expected_num_rows))
| apache-2.0 |
morrisonwudi/zipline | zipline/history/history.py | 20 | 12233 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import numpy as np
import pandas as pd
import re
from zipline.finance import trading
from zipline.finance.trading import with_environment
from zipline.errors import IncompatibleHistoryFrequency
def parse_freq_str(freq_str):
# TODO: Wish we were more aligned with pandas here.
num_str, unit_str = re.match('([0-9]+)([A-Za-z]+)', freq_str).groups()
return int(num_str), unit_str
class Frequency(object):
"""
Represents how the data is sampled, as specified by the algoscript
via units like "1d", "1m", etc.
Currently only two frequencies are supported, "1d" and "1m"
- "1d" provides data at daily frequency, with the latest bar aggregating
the elapsed minutes of the (incomplete) current day
- "1m" provides data at minute frequency
"""
SUPPORTED_FREQUENCIES = frozenset({'1d', '1m'})
MAX_MINUTES = {'m': 1, 'd': 390}
MAX_DAYS = {'d': 1}
def __init__(self, freq_str, data_frequency):
if freq_str not in self.SUPPORTED_FREQUENCIES:
raise ValueError(
"history frequency must be in {supported}".format(
supported=self.SUPPORTED_FREQUENCIES,
))
# The string the at the algoscript specifies.
# Hold onto to use a key for caching.
self.freq_str = freq_str
# num - The number of units of the frequency.
# unit_str - The unit type, e.g. 'd'
self.num, self.unit_str = parse_freq_str(freq_str)
self.data_frequency = data_frequency
def next_window_start(self, previous_window_close):
"""
Get the first minute of the window starting after a window that
finished on @previous_window_close.
"""
if self.unit_str == 'd':
return self.next_day_window_start(previous_window_close,
self.data_frequency)
elif self.unit_str == 'm':
return self.next_minute_window_start(previous_window_close)
@staticmethod
def next_day_window_start(previous_window_close, data_frequency='minute'):
"""
Get the next day window start after @previous_window_close. This is
defined as the first market open strictly greater than
@previous_window_close.
"""
env = trading.environment
if data_frequency == 'daily':
next_open = env.next_trading_day(previous_window_close)
else:
next_open = env.next_market_minute(previous_window_close)
return next_open
@staticmethod
def next_minute_window_start(previous_window_close):
"""
Get the next minute window start after @previous_window_close. This is
defined as the first market minute strictly greater than
@previous_window_close.
"""
env = trading.environment
return env.next_market_minute(previous_window_close)
def window_open(self, window_close):
"""
For a period ending on `window_end`, calculate the date of the first
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_open(window_close, self.num)
elif self.unit_str == 'm':
return self.minute_window_open(window_close, self.num)
def window_close(self, window_start):
"""
For a period starting on `window_start`, calculate the date of the last
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_close(window_start, self.num)
elif self.unit_str == 'm':
return self.minute_window_close(window_start, self.num)
def day_window_open(self, window_close, num_days):
"""
Get the first minute for a daily window of length @num_days with last
minute @window_close. This is calculated by searching backward until
@num_days market_closes are encountered.
"""
env = trading.environment
open_ = env.open_close_window(
window_close,
1,
offset=-(num_days - 1)
).market_open.iloc[0]
if self.data_frequency == 'daily':
open_ = pd.tslib.normalize_date(open_)
return open_
def minute_window_open(self, window_close, num_minutes):
"""
Get the first minute for a minutely window of length @num_minutes with
last minute @window_close.
This is defined as window_close if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_close
env = trading.environment
return env.market_minute_window(window_close, count=-num_minutes)[-1]
def day_window_close(self, window_start, num_days):
"""
Get the window close for a daily frequency.
If the data_frequency is minute, then this will be the last minute of
last day of the window.
If the data_frequency is minute, this will be midnight utc of the last
day of the window.
"""
env = trading.environment
if self.data_frequency != 'daily':
return env.get_open_and_close(
env.add_trading_days(num_days - 1, window_start),
)[1]
return pd.tslib.normalize_date(
env.add_trading_days(num_days - 1, window_start),
)
def minute_window_close(self, window_start, num_minutes):
"""
Get the last minute for a minutely window of length @num_minutes with
first minute @window_start.
This is defined as window_start if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_start
env = trading.environment
return env.market_minute_window(window_start, count=num_minutes)[-1]
@with_environment()
def prev_bar(self, dt, env=None):
"""
Returns the previous bar for dt.
"""
if self.unit_str == 'd':
if self.data_frequency == 'minute':
def func(dt):
return env.get_open_and_close(
env.previous_trading_day(dt))[1]
else:
func = env.previous_trading_day
else:
func = env.previous_market_minute
# Cache the function dispatch.
self.prev_bar = func
return func(dt)
@property
def max_bars(self):
if self.data_frequency == 'daily':
return self.max_days
else:
return self.max_minutes
@property
def max_days(self):
if self.data_frequency != 'daily':
raise ValueError('max_days requested in minute mode')
return self.MAX_DAYS[self.unit_str] * self.num
@property
def max_minutes(self):
"""
The maximum number of minutes required to roll a bar at this frequency.
"""
if self.data_frequency != 'minute':
raise ValueError('max_minutes requested in daily mode')
return self.MAX_MINUTES[self.unit_str] * self.num
def normalize(self, dt):
if self.data_frequency != 'daily':
return dt
return pd.tslib.normalize_date(dt)
def __eq__(self, other):
return self.freq_str == other.freq_str
def __hash__(self):
return hash(self.freq_str)
def __repr__(self):
return ''.join([str(self.__class__.__name__),
"('", self.freq_str, "')"])
class HistorySpec(object):
"""
Maps to the parameters of the history() call made by the algoscript
An object is used here so that get_history calls are not constantly
parsing the parameters and provides values for caching and indexing into
result frames.
"""
FORWARD_FILLABLE = frozenset({'price'})
@classmethod
def spec_key(cls, bar_count, freq_str, field, ffill):
"""
Used as a hash/key value for the HistorySpec.
"""
return "{0}:{1}:{2}:{3}".format(
bar_count, freq_str, field, ffill)
def __init__(self, bar_count, frequency, field, ffill,
data_frequency='daily'):
# Number of bars to look back.
self.bar_count = bar_count
if isinstance(frequency, str):
frequency = Frequency(frequency, data_frequency)
if frequency.unit_str == 'm' and data_frequency == 'daily':
raise IncompatibleHistoryFrequency(
frequency=frequency.unit_str,
data_frequency=data_frequency,
)
# The frequency at which the data is sampled.
self.frequency = frequency
# The field, e.g. 'price', 'volume', etc.
self.field = field
# Whether or not to forward fill nan data. Only has an effect if this
# spec's field is in FORWARD_FILLABLE.
self._ffill = ffill
# Calculate the cache key string once.
self.key_str = self.spec_key(
bar_count, frequency.freq_str, field, ffill)
@property
def ffill(self):
"""
Wrapper around self._ffill that returns False for fields which are not
forward-fillable.
"""
return self._ffill and self.field in self.FORWARD_FILLABLE
def __repr__(self):
return ''.join([self.__class__.__name__, "('", self.key_str, "')"])
def days_index_at_dt(history_spec, algo_dt):
"""
Get the index of a frame to be used for a get_history call with daily
frequency.
"""
env = trading.environment
# Get the previous (bar_count - 1) days' worth of market closes.
day_delta = (history_spec.bar_count - 1) * history_spec.frequency.num
market_closes = env.open_close_window(
algo_dt,
day_delta,
offset=(-day_delta),
step=history_spec.frequency.num,
).market_close
if history_spec.frequency.data_frequency == 'daily':
market_closes = market_closes.apply(pd.tslib.normalize_date)
# Append the current algo_dt as the last index value.
# Using the 'rawer' numpy array values here because of a bottleneck
# that appeared when using DatetimeIndex
return np.append(market_closes.values, algo_dt)
def minutes_index_at_dt(history_spec, algo_dt):
"""
Get the index of a frame to be used for a get_history_call with minutely
frequency.
"""
# TODO: This is almost certainly going to be too slow for production.
env = trading.environment
return env.market_minute_window(
algo_dt,
history_spec.bar_count,
step=-1,
)[::-1]
def index_at_dt(history_spec, algo_dt):
"""
Returns index of a frame returned by get_history() with the given
history_spec and algo_dt.
The resulting index will have @history_spec.bar_count bars, increasing in
units of @history_spec.frequency, terminating at the given @algo_dt.
Note: The last bar of the returned frame represents an as-of-yet incomplete
time window, so the delta between the last and second-to-last bars is
usually always less than `@history_spec.frequency` for frequencies greater
than 1m.
"""
frequency = history_spec.frequency
if frequency.unit_str == 'd':
return days_index_at_dt(history_spec, algo_dt)
elif frequency.unit_str == 'm':
return minutes_index_at_dt(history_spec, algo_dt)
| apache-2.0 |
gclenaghan/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
jereze/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
soylentdeen/BlurryApple | Tools/DM/invert.py | 1 | 2728 | import scipy
import numpy
import pyfits
from scipy.linalg import *
import matplotlib.pyplot as pyplot
import inversion
import VLTTools
hostname = "aortc3"
username = "spacimgr"
aortc= VLTTools.VLTConnection(hostname=hostname, username = username)
#"""
def pseudoInverse(filename, numFilteredModes=50):
A = scipy.matrix(pyfits.getdata(filename))
dims = A.shape
U,S,V = svd(A)
D = 1.0/(S[0:-numFilteredModes])
#S[-numFilteredModes+1:-1] = 0.0
S[-numFilteredModes:] = 0.0
newS = numpy.zeros([dims[0], dims[1]])
I = [i for i in range(dims[1])]
for i in range(len(D)):
newS[i][i] = D[i]
#S = newS.copy()
retval = scipy.matrix(V.T.dot(newS.T.dot(U.T)), dtype=numpy.float32)
singular_values = newS.diagonal()
svs = singular_values[singular_values.nonzero()[0]]
result = A.dot(retval)
return retval, numpy.max(svs)/numpy.min(svs), result
#"""
datadir = './data/'
HODM_IMdf, TTM_IMdf = aortc.get_InteractionMatrices()
#HODM_IMdf = datadir+'IM_9Dec2014.fits'
#HODM_IMdf = 'HODM_Calibration_150813.fits'
#HODM_IMdf = 'HO_IM_1021.fits'
HODM_CMdf = 'HODM_CM'
#TTM_IMdf = datadir+'TTRecnCalibrat.RESULT_IM.fits'
#TTM_IMdf = datadir+'TT_IM.fits'
TTM_CMdf = 'TTM_CM.fits'
A = scipy.matrix(pyfits.getdata(datadir+TTM_IMdf)).getI()
cns = []
fmodes = []
inv, cn, junk = pseudoInverse(datadir+HODM_IMdf, 10)
#f = pyplot.figure(0)
#f.clear()
#ax = f.add_axes([0.1, 0.1, 0.8, 0.8])
#ax.set_yscale('log')
#ax.imshow(junk)
#f.show()
#f.savefig('ConditionNumbers.png')
#print asdf
CM = numpy.resize(inv, (62, 136))
CM[-2] = A[0]
CM[-1] = A[1]
pyfits.writeto('data/'+HODM_CMdf+'.fits', CM, clobber=True)
aortc.set_CommandMatrix(CM)
print asdf
f = pyplot.figure(0)
f.clear()
ax = f.add_axes([0.1, 0.1, 0.8, 0.8])
#ax.set_yscale('log')
ax.plot(fmodes, cns)
ax.set_xlabel('Number of Filtered Modes')
ax.set_ylabel('Condition Number')
#f.show()
#f.savefig('ConditionNumbers.png')
#"""
for i in [10, 20, 30, 40]:
inv, cn, junk = pseudoInverse(datadir+HODM_IMdf, i+1)
fmodes.append(i+1)
cns.append(cn)
#CM = inv
CM = numpy.resize(inv, (62, 136))
#CM = numpy.zeros((62, 136), dtype='float32')
#CM[-2] = numpy.zeros(136, dtype='float32')
#CM[-1] = numpy.zeros(136, dtype='float32')
CM[-2] = A[0]
CM[-1] = A[1]
pyfits.writeto('data/'+HODM_CMdf+str(i)+'.fits', CM, clobber=True)
#pyfits.writeto('Output/ident_'+str(i)+'.fits', junk.T, clobber=True)
zeros = numpy.zeros(CM.shape, dtype=numpy.float32)
pyfits.writeto("Output/Zeros.fits", zeros, clobber=True)
addone = zeros.copy()
addone[9][37] = 1.0
pyfits.writeto("Output/test.fits", addone, clobber=True)
print 'done!'
#"""
#pyfits.writeto(TTM_CMdf, A, clobber=True)
| gpl-2.0 |
hanfang/glmnet_python | test/example_mgaussian.py | 2 | 1404 | # Import relevant modules and setup for calling glmnet
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy
import importlib
import matplotlib.pyplot as plt
import time
import glmnet
from glmnetPlot import glmnetPlot
import glmnetPrint
import glmnetCoef
import glmnetPredict
import cvglmnet
import cvglmnetCoef
import cvglmnetPlot
import cvglmnetPredict
importlib.reload(glmnet)
#importlib.reload(glmnetPlot)
importlib.reload(glmnetPrint)
importlib.reload(glmnetCoef)
importlib.reload(glmnetPredict)
importlib.reload(cvglmnet)
importlib.reload(cvglmnetCoef)
importlib.reload(cvglmnetPlot)
importlib.reload(cvglmnetPredict)
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleX.dat', dtype = scipy.float64, delimiter = ',')
y = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleY.dat', dtype = scipy.float64, delimiter = ',')
# call glmnet
mfit = glmnet.glmnet(x = x.copy(), y = y.copy(), family = 'mgaussian')
plt.figure()
glmnetPlot(mfit, xvar = 'lambda', label = True, ptype = '2norm')
f = glmnetPredict.glmnetPredict(mfit, x[0:5,:], s = scipy.float64([0.1, 0.01]))
print(f[:,:,0])
print(f[:,:,1])
plt.figure()
t = time.time()
cvmfit = cvglmnet.cvglmnet(x = x.copy(), y = y.copy(), family = "mgaussian", parallel = True)
e = time.time() - t
print('time elapsed = ', e)
cvglmnetPlot.cvglmnetPlot(cvmfit)
| gpl-2.0 |
cxcsds/ciao-contrib | dax/dax_plot_utils.py | 1 | 5314 | #
# Copyright (C) 2020
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Emulate the sherpa plot commands using blt via ds9
sherpa has various plot commands to plot data, models, residuals, etc.
dax needs to emulate this plots but instead of using sherpa plotting
backends (eg matplotlib), we are using the BLT plotting available
through ds9. This way dax doesn't have to spawn background processes
and keep track of things running in the background to cleanup/etc.
"""
import subprocess
__all__ = ( "blt_plot_data", "blt_plot_model", "blt_plot_delchisqr" )
def xpa_plot_cmd( access_point, command ):
"""Wrapper around xpaset for plot commands"""
cc = ["xpaset", "-p", access_point, "plot" ]
cc.extend( command.split(' '))
xpa = subprocess.Popen(cc)
xpa.communicate()
def blt_plot_data(access_point,xx, ex, yy, ey):
"""Plot the data"""
cmd = ["xpaset", access_point, "plot"]
cmd.extend( ["data", "xyey"] )
# Plot the data
xpa = subprocess.Popen( cmd, stdin=subprocess.PIPE )
for vv in zip(xx, yy, ey):
pair = " ".join( [str(x) for x in vv])+"\n"
pb = pair.encode()
xpa.stdin.write(pb)
xpa.communicate()
make_pretty(access_point)
xpa_plot_cmd(access_point, "legend yes")
xpa_plot_cmd(access_point, "legend position right")
def blt_plot_model(access_point,x_vals, y_vals, title, x_label, y_label,
new=True, winname="dax", step=True):
"""Plot the model"""
if not new:
cmd = xpa_plot_cmd(access_point, "{} close".format(winname))
cmd = ["xpaset", access_point, "plot", "new"]
cmd.extend( ["name", winname, "line",
"{{{0}}}".format(title),
"{{{0} }}".format(x_label),
"{{{0} }}".format(y_label),
"xy"
] )
# ~ else:
# ~ xpa_plot_cmd( access_point, "layout grid")
# ~ xpa_plot_cmd(access_point, winname+" delete dataset")
# ~ xpa_plot_cmd(access_point, winname+" delete graph")
# ~ xpa_plot_cmd(access_point, winname+" delete dataset")
# ~ xpa_plot_cmd(access_point, winname+" delete dataset")
# ~ cmd = ["xpaset", access_point, "plot", "data", "xy"]
xpa = subprocess.Popen( cmd, stdin=subprocess.PIPE )
for x,y in zip(x_vals, y_vals):
pair = "{} {}\n".format(x,y)
pb = pair.encode()
xpa.stdin.write(pb)
xpa.communicate()
xpa_plot_cmd(access_point, "shape none")
xpa_plot_cmd(access_point, "shape fill no")
xpa_plot_cmd(access_point, "color orange")
xpa_plot_cmd(access_point, "shape color orange")
xpa_plot_cmd(access_point, "width 2")
if step:
xpa_plot_cmd(access_point, "smooth step")
xpa_plot_cmd(access_point, "name Model")
def blt_plot_delchisqr(access_point,xx, ex, yy, ey, y_label):
"""Plot the residuals"""
# This requires ds9 v8.1
xpa_plot_cmd( access_point, "add graph line")
xpa_plot_cmd( access_point, "layout strip")
# Add line through 0
x0 = [xx[0]-ex[0], xx[-1]+ex[-1]]
y0 = [0, 0]
cmd = ["xpaset", access_point, "plot", "data", "xy"]
xpa = subprocess.Popen( cmd, stdin=subprocess.PIPE )
for vv in zip(x0, y0):
pair = " ".join( [str(x) for x in vv])+"\n"
pb = pair.encode()
xpa.stdin.write(pb)
xpa.communicate()
xpa_plot_cmd(access_point, "shape none")
xpa_plot_cmd(access_point, "shape fill no")
xpa_plot_cmd(access_point, "color grey")
xpa_plot_cmd(access_point, "name zero")
xpa_plot_cmd(access_point, "width 1")
xpa_plot_cmd(access_point, "dash yes")
# Plot the data
cmd = ["xpaset", access_point, "plot", "data", "xyexey"]
xpa = subprocess.Popen( cmd, stdin=subprocess.PIPE )
for vv in zip(xx, yy, ex, ey):
pair = " ".join( [str(x) for x in vv])+"\n"
pb = pair.encode()
xpa.stdin.write(pb)
xpa.communicate()
make_pretty(access_point)
xpa_plot_cmd( access_point, "title y {delta chisqr}")
xpa_plot_cmd( access_point, "name {delchi}")
def make_pretty(access_point):
"""make pretty plots"""
xpa_plot_cmd(access_point, "shape circle")
xpa_plot_cmd(access_point, "shape fill yes")
xpa_plot_cmd(access_point, "shape color cornflowerblue")
xpa_plot_cmd(access_point, "error color cornflowerblue")
xpa_plot_cmd(access_point, "width 0")
xpa_plot_cmd(access_point, "name {Data }")
xpa_plot_cmd(access_point, "axis x grid no")
xpa_plot_cmd(access_point, "axis y grid no")
| gpl-3.0 |
intelligent-agent/redeem | redeem/BedCompensation.py | 1 | 8757 | """
Author: Elias Bakken
email: elias(dot)bakken(at)gmail(dot)com
Website: http://www.thing-printer.com
License: GNU GPL v3: http://www.gnu.org/copyleft/gpl.html
Redeem is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Redeem is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Redeem. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import copy
class BedCompensation:
@staticmethod
def create_rotation_matrix(probe_points, probe_heights):
probe_points = copy.deepcopy(probe_points)
""" http://math.stackexchange.com/a/476311 """
if len(probe_points) == 3:
P0 = np.array(
[probe_points[0]["X"] / 1000.0, probe_points[0]["Y"] / 1000.0, probe_heights[0] / 1000.0])
P1 = np.array(
[probe_points[1]["X"] / 1000.0, probe_points[1]["Y"] / 1000.0, probe_heights[1] / 1000.0])
P2 = np.array(
[probe_points[2]["X"] / 1000.0, probe_points[2]["Y"] / 1000.0, probe_heights[2] / 1000.0])
else:
# Add Z (height) to the probe points
for k, v in enumerate(probe_points):
probe_points[k]["X"] /= 1000.0
probe_points[k]["Y"] /= 1000.0
probe_points[k]["Z"] = probe_heights[k] / 1000.0
(P0, P1, P2) = BedCompensation.create_plane_from_points(probe_points)
# calculate the bed normal vector
P10 = BedCompensation.normalize(P0 - P1)
P21 = BedCompensation.normalize(P2 - P1)
bed_normal = BedCompensation.normalize(np.cross(P10, P21))
# calculate a normal vector in world space in the same direction as the bed normal
ideal_normal = np.array([0.0, 0.0, np.sign(bed_normal[2])])
# calculate the rotation matrix that will align the ideal normal
# with the bed normal
v = np.cross(bed_normal, ideal_normal)
c = np.dot(bed_normal, ideal_normal)
s = np.linalg.norm(v)
ssc = np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]])
R = np.eye(3) + ssc + (ssc**2) * (1.0 - c) / (s**2)
# check if the rotation matrix is valid, if not then return identity matrix
if np.all(np.isfinite(R)):
return R
#TODO: This makes no sense, it should be R, not R/4
return R * 0.25 + np.eye(3) * 0.75
else:
return np.eye(3)
@staticmethod
def normalize(vec):
return vec / np.linalg.norm(vec)
@staticmethod
def create_plane_from_points(points):
""" This method uses linear regression (least squares) to fit a plane
to a set of data points. This is useful if the number of probe points is > 3.
The plane is then used to sample three new points. """
x = []
y = []
z = []
for p in points:
x.append(p["X"])
y.append(p["Y"])
z.append(p["Z"])
A = np.column_stack((np.ones(len(x)), x, y))
# Solve for a least squares estimate
(coeffs, residuals, rank, sing_vals) = np.linalg.lstsq(A, z)
X = np.linspace(min(x), max(x), 3)
Y = np.linspace(min(y), max(y), 3)
X, Y = np.meshgrid(X, Y)
Z = coeffs[0] + coeffs[1] * X + coeffs[2] * Y
# Resample the probe points based on the least squares plane found.
P0 = np.array([min(x), min(y), coeffs[0] + coeffs[1] * min(x) + coeffs[2] * min(y)])
P1 = np.array([min(x), max(y), coeffs[0] + coeffs[1] * min(x) + coeffs[2] * max(y)])
P2 = np.array([(max(x) - min(x)) / 2.0,
max(y), coeffs[0] + coeffs[1] * (max(x) - min(x)) / 2.0 + coeffs[2] * max(y)])
return (P0, P1, P2)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import json
points = [{
"Y": 0.0,
"X": 65.0,
"Z": -1.6499999999999995
}, {
"Y": 32.5,
"X": 56.29,
"Z": -1.0625
}, {
"Y": 56.29,
"X": 32.5,
"Z": -0.56249999999999967
}, {
"Y": 65.0,
"X": 0.0,
"Z": -0.40000000000000019
}, {
"Y": 56.29,
"X": -32.5,
"Z": -0.67500000000000027
}, {
"Y": 32.5,
"X": -56.29,
"Z": -1.1875000000000002
}, {
"Y": 0.0,
"X": -65.0,
"Z": -1.7499999999999998
}, {
"Y": -32.5,
"X": -56.29,
"Z": -2.1624999999999996
}, {
"Y": -56.29,
"X": -32.5,
"Z": -2.4250000000000003
}, {
"Y": -65.0,
"X": -0.0,
"Z": -2.4375000000000004
}, {
"Y": -56.29,
"X": 32.5,
"Z": -2.3374999999999995
}, {
"Y": -32.5,
"X": 56.29,
"Z": -2.0999999999999996
}, {
"Y": 0.0,
"X": 32.5,
"Z": -1.5624999999999996
}, {
"Y": 16.25,
"X": 28.15,
"Z": -1.2624999999999997
}, {
"Y": 28.15,
"X": 16.25,
"Z": -1.0375000000000003
}, {
"Y": 32.5,
"X": 0.0,
"Z": -0.9750000000000002
}, {
"Y": 28.15,
"X": -16.25,
"Z": -1.0874999999999999
}, {
"Y": 16.25,
"X": -28.15,
"Z": -1.3499999999999996
}, {
"Y": 0.0,
"X": -32.5,
"Z": -1.6624999999999999
}, {
"Y": -16.25,
"X": -28.15,
"Z": -1.9249999999999996
}, {
"Y": -28.15,
"X": -16.25,
"Z": -2.0625
}, {
"Y": -32.5,
"X": -0.0,
"Z": -2.087499999999999
}, {
"Y": -28.15,
"X": 16.25,
"Z": -2.0
}, {
"Y": -16.25,
"X": 28.15,
"Z": -1.8250000000000002
}, {
"Y": 0.0,
"X": 0.0,
"Z": -1.575
}]
fixed = [{
"Y": 0.0,
"X": 65.0,
"Z": -1.7000000000000002
}, {
"Y": 32.5,
"X": 56.29,
"Z": -1.6249999999999998
}, {
"Y": 56.29,
"X": 32.5,
"Z": -1.4749999999999996
}, {
"Y": 65.0,
"X": 0.0,
"Z": -1.425
}, {
"Y": 56.29,
"X": -32.5,
"Z": -1.5374999999999999
}, {
"Y": 32.5,
"X": -56.29,
"Z": -1.6375000000000002
}, {
"Y": 0.0,
"X": -65.0,
"Z": -1.6874999999999998
}, {
"Y": -32.5,
"X": -56.29,
"Z": -1.5624999999999996
}, {
"Y": -56.29,
"X": -32.5,
"Z": -1.4999999999999996
}, {
"Y": -65.0,
"X": -0.0,
"Z": -1.3749999999999996
}, {
"Y": -56.29,
"X": 32.5,
"Z": -1.45
}, {
"Y": -32.5,
"X": 56.29,
"Z": -1.6249999999999998
}, {
"Y": 0.0,
"X": 32.5,
"Z": -1.575
}, {
"Y": 16.25,
"X": 28.15,
"Z": -1.5249999999999995
}, {
"Y": 28.15,
"X": 16.25,
"Z": -1.4749999999999996
}, {
"Y": 32.5,
"X": 0.0,
"Z": -1.45
}, {
"Y": 28.15,
"X": -16.25,
"Z": -1.4749999999999996
}, {
"Y": 16.25,
"X": -28.15,
"Z": -1.5374999999999999
}, {
"Y": 0.0,
"X": -32.5,
"Z": -1.5874999999999995
}, {
"Y": -16.25,
"X": -28.15,
"Z": -1.5999999999999999
}, {
"Y": -28.15,
"X": -16.25,
"Z": -1.575
}, {
"Y": -32.5,
"X": -0.0,
"Z": -1.5500000000000003
}, {
"Y": -28.15,
"X": 16.25,
"Z": -1.5374999999999999
}, {
"Y": -16.25,
"X": 28.15,
"Z": -1.5624999999999996
}, {
"Y": 0.0,
"X": 0.0,
"Z": -1.5500000000000003
}]
add = points[-1]["Z"]
x1, y1, z1 = map(list, zip(*map(lambda d: tuple(np.array([d['X'], d['Y'], d['Z']])), points)))
x3, y3, z3 = map(list, zip(*map(lambda d: tuple(np.array([d['X'], d['Y'], d['Z']])), fixed)))
z = map(lambda d: d['Z'], points)
Rn = BedCompensation.create_rotation_matrix(points, z)
x2, y2, z2 = map(list,
zip(*map(lambda d: tuple(np.array([d['X'], d['Y'], add]).dot(Rn)), points)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(x1, y1, z1, linestyle="none", marker="o", mfc="none", markeredgecolor="red")
ax.plot(x2, y2, z2, linestyle="none", marker=".", mfc="none", markeredgecolor="green")
ax.plot(x3, y3, z3, linestyle="none", marker="o", mfc="none", markeredgecolor="blue")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
print("Level bed max diff before: " + str(max(z1) - min(z1)) + " after: " +
str(max(z3) - min(z3)))
print("var matrix = " + json.dumps(Rn.tolist()) + ";")
probe = {"x": x1, "y": y1, "z": z1}
print("var probe = " + json.dumps(probe) + ";")
fixed = {"x": x3, "y": y3, "z": z3}
print("var fixed = " + json.dumps(fixed) + ";")
plt.show()
| gpl-3.0 |
manashmndl/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/io/gbq.py | 13 | 4006 | """ Google BigQuery support """
def _try_import():
# since pandas is a dependency of pandas-gbq
# we need to import on first use
try:
import pandas_gbq
except ImportError:
# give a nice error message
raise ImportError("Load data from Google BigQuery\n"
"\n"
"the pandas-gbq package is not installed\n"
"see the docs: https://pandas-gbq.readthedocs.io\n"
"\n"
"you can install via pip or conda:\n"
"pip install pandas-gbq\n"
"conda install pandas-gbq -c conda-forge\n")
return pandas_gbq
def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, verbose=True, private_key=None, dialect='legacy',
**kwargs):
r"""Load data from Google BigQuery.
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
query : str
SQL-Like Query to return data values
project_id : str
Google BigQuery Account project ID.
index_col : str (optional)
Name of result column to use for index in results DataFrame
col_order : list(str) (optional)
List of BigQuery column names in the desired order for results
DataFrame
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
verbose : boolean (default True)
Verbose output
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard. For more information
see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/sql-reference/>`__
**kwargs : Arbitrary keyword arguments
configuration (dict): query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__
Returns
-------
df: DataFrame
DataFrame representing results of query
"""
pandas_gbq = _try_import()
return pandas_gbq.read_gbq(
query, project_id=project_id,
index_col=index_col, col_order=col_order,
reauth=reauth, verbose=verbose,
private_key=private_key,
dialect=dialect,
**kwargs)
def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
pandas_gbq = _try_import()
pandas_gbq.to_gbq(dataframe, destination_table, project_id,
chunksize=chunksize,
verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
| agpl-3.0 |
ankurankan/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
fabianp/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 40 | 23697 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
shusenl/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Oleg-Krivosheev/RDP | rdp.py | 2 | 17407 | #!/usr/bin/env python
#//new algo to compute the distance
#//from the current point to the segment
#//defined by P1 and P2
#float l2 = P1.DistanceSquareToPoint(P2);
#//maybe do |l2| < epsilon
#if (l2 == 0.0f)
#{
# return this->DistanceSquareToPoint(P1);
#}
#// closest to this point position along the segment
#float t = ((r - P1.r) * (P2.r - P1.r) + (z - P1.z) * (P2.z - P1.z)) / l2;
#// clamping position along the segment to be within [0...1]
#t = std::min( std::max( t, 0.0f), 1.0f );
#// build on-segment closest tmp point and return distance to it
#return DistanceSquareToPoint( zrPoint(P1.z + t*(P2.z-P1.z), P1.r + t*(P2.r-P1.r)) );
import matplotlib.pyplot as plt
X = 0
Y = 1
def scale(s, pt):
"""
"""
return s*pt[X], s*pt[Y]
def dist_squared(p1, p2):
"""
Compute squared distance betweeb points
:param p1: first point
:param p2: second point
:return: squared distance
"""
return (p1[X] - p2[X])**2 + (p1[Y] - p2[Y])**2
def sub(p1, p2):
"""
Compute difference between points
:param p1: first point
:param p2: second point
:return: difference point
"""
return p1[X] - p2[X], p1[Y] - p2[Y]
def add(p1, p2):
"""
"""
return p1[X] + p2[X], p1[Y] + p2[Y]
def dot(p1, p2):
"""
Dot product
:param p1:
:param p2:
:return:
"""
return p1[X] * p2[X] + p1[Y] * p2[Y]
def dist_to_segment(begin, end, curr):
"""
returns shorted squared distance from curr point
to segment (begin, end)
"""
d2 = dist_squared(begin, end)
if d2 == 0.0:
return dist_squared(begin, curr)
diff = sub(end, begin)
d2beg = sub(curr, begin)
t = min(1.0, max(0.0, dot(diff, d2beg) / d2))
return dist_squared(add(begin, scale(t, diff)), curr)
def ramerdouglas(line, dist):
"""Does Ramer-Douglas-Peucker simplification of a curve with `dist`
threshold.
`line` is a list-of-tuples, where each tuple is a 2D coordinate
Usage is like so:
>>> myline = [(0.0, 0.0), (1.0, 2.0), (2.0, 1.0)]
>>> simplified = ramerdouglas(myline, dist = 1.0)
"""
if dist == 0.0:
return line[:]
if len(line) < 3:
return line
(begin, end) = (line[0], line[-1]) if line[0] != line[-1] else (line[0], line[-2])
pos = -1
maxdist = -1.0
l = len(line)
for k in range(1,l):
d2 = dist_to_segment(begin, end, line[k])
if d2 > maxdist:
maxdist = d2
pos = k - 1
if maxdist < dist ** 2:
return [begin, end]
return (ramerdouglas(line[:pos + 2], dist) +
ramerdouglas(line[pos + 1:], dist)[1:])
def to_xy(line):
x = []
y = []
for pt in line:
xx, yy = pt
x.append(xx)
y.append(yy)
return (x, y)
if __name__ == "__main__":
coast = [
( 6.247872 , 11.316756 ),
( 6.338566 , 11.316756 ),
( 6.633323 , 11.205644 ),
( 6.724018 , 11.205644 ),
( 6.792039 , 11.205644 ),
( 7.154817 , 11.372311 ),
( 7.313532 , 11.400089 ),
( 7.381553 , 11.344533 ),
( 7.336206 , 11.288978 ),
( 7.200164 , 11.288978 ),
( 7.154817 , 11.261200 ),
( 7.132143 , 11.233422 ),
( 7.154817 , 11.150089 ),
( 7.268185 , 11.177867 ),
( 7.313532 , 11.122311 ),
( 7.404227 , 11.150089 ),
( 7.472248 , 11.094533 ),
( 7.767005 , 10.900089 ),
( 7.758951 , 10.864989 ),
( 7.752684 , 10.837656 ),
( 7.426900 , 10.927867 ),
( 6.519955 , 10.927867 ),
( 6.429261 , 10.900089 ),
( 6.315893 , 10.955644 ),
( 6.270545 , 10.955644 ),
( 6.247872 , 10.927867 ),
( 6.111830 , 11.011200 ),
( 6.066483 , 11.066756 ),
( 5.862420 , 11.038978 ),
( 5.817073 , 10.955644 ),
( 5.771726 , 10.900089 ),
( 5.862420 , 10.761200 ),
( 5.975788 , 10.733422 ),
( 6.157177 , 10.566756 ),
( 6.247872 , 10.511200 ),
( 6.293219 , 10.427867 ),
( 6.315893 , 10.233422 ),
( 6.315893 , 10.177867 ),
( 6.542629 , 9.844533 ),
( 6.587976 , 9.761200 ),
( 6.610650 , 9.288978 ),
( 6.542629 , 9.066756 ),
( 6.565303 , 8.900089 ),
( 6.519955 , 8.816756 ),
( 6.542629 , 8.761200 ),
( 6.565303 , 8.733422 ),
( 6.429261 , 8.427867 ),
( 6.474608 , 8.316756 ),
( 6.724018 , 8.288978 ),
( 6.882733 , 8.538978 ),
( 6.973428 , 8.594533 ),
( 6.996101 , 8.622311 ),
( 7.200164 , 8.650089 ),
( 7.290859 , 8.650089 ),
( 7.426900 , 8.483422 ),
( 7.404227 , 8.455644 ),
( 7.245511 , 8.511200 ),
( 6.996101 , 8.427867 ),
( 7.041449 , 8.372311 ),
( 7.154817 , 8.455644 ),
( 7.200164 , 8.455644 ),
( 7.245511 , 8.455644 ),
( 7.381553 , 8.316756 ),
( 7.381553 , 8.261200 ),
( 7.404227 , 8.233422 ),
( 7.494921 , 8.205644 ),
( 7.767005 , 8.288978 ),
( 7.948394 , 8.233422 ),
( 8.016415 , 8.261200 ),
( 8.197804 , 8.094533 ),
( 8.084435 , 7.816756 ),
( 8.152456 , 7.733422 ),
( 8.175130 , 7.650089 ),
( 8.175130 , 7.511200 ),
( 8.311172 , 7.427867 ),
( 8.311172 , 7.372311 ),
( 8.651276 , 7.372311 ),
( 8.923360 , 7.316756 ),
( 8.900686 , 7.261200 ),
( 8.809991 , 7.261200 ),
( 8.472735 , 7.171122 ),
( 8.333845 , 7.038978 ),
( 8.282022 , 6.981100 ),
( 8.254778 , 6.848911 ),
( 8.265824 , 6.816756 ),
( 8.239206 , 6.711211 ),
( 8.219743 , 6.612067 ),
( 8.130227 , 6.433044 ),
( 8.084435 , 6.316756 ),
( 8.107109 , 6.288978 ),
( 7.948394 , 6.177867 ),
( 7.925720 , 5.983422 ),
( 7.857699 , 5.816756 ),
( 7.835026 , 5.788978 ),
( 7.857699 , 5.511200 ),
( 7.812352 , 5.400089 ),
( 7.812352 , 5.344533 ),
( 7.812352 , 5.177867 ),
( 8.084435 , 4.733422 ),
( 8.107109 , 4.622311 ),
( 7.857699 , 4.344533 ),
( 7.630963 , 4.261200 ),
( 7.540268 , 4.177867 ),
( 7.494921 , 4.150089 ),
( 7.449574 , 4.150089 ),
( 7.404227 , 4.150089 ),
( 7.336206 , 4.094533 ),
( 7.313532 , 4.066756 ),
( 7.041449 , 4.011200 ),
( 6.905407 , 3.955644 ),
( 6.950754 , 3.900089 ),
( 7.200164 , 3.927867 ),
( 7.630963 , 3.872311 ),
( 7.721657 , 3.872311 ),
( 7.948394 , 3.788978 ),
( 7.993741 , 3.705644 ),
( 7.971067 , 3.677867 ),
( 7.925720 , 3.622311 ),
( 8.175130 , 3.705644 ),
( 8.401866 , 3.650089 ),
( 8.492561 , 3.650089 ),
( 8.605929 , 3.538978 ),
( 8.651276 , 3.566756 ),
( 8.855339 , 3.372311 ),
( 8.900686 , 3.316756 ),
( 8.900686 , 3.150089 ),
( 8.787318 , 2.900089 ),
( 8.787318 , 2.844533 ),
( 8.946033 , 2.816756 ),
( 8.991380 , 2.788978 ),
( 9.014054 , 2.705644 ),
( 8.886928 , 2.524989 ),
( 8.832665 , 2.538978 ),
( 8.809991 , 2.455644 ),
( 8.923360 , 2.538978 ),
( 9.014054 , 2.400089 ),
( 9.308811 , 2.288978 ),
( 9.399506 , 2.261200 ),
( 9.512874 , 2.122311 ),
( 9.535548 , 1.983422 ),
( 9.512874 , 1.955644 ),
( 9.467527 , 1.816756 ),
( 9.036728 , 1.816756 ),
( 8.991380 , 1.927867 ),
( 8.946033 , 1.955644 ),
( 8.900686 , 1.983422 ),
( 8.946033 , 2.122311 ),
( 8.968707 , 2.150089 ),
( 9.195443 , 1.927867 ),
( 9.354158 , 1.955644 ),
( 9.376832 , 2.038978 ),
( 9.376832 , 2.094533 ),
( 9.240790 , 2.205644 ),
( 9.195443 , 2.205644 ),
( 9.263464 , 2.150089 ),
( 9.240790 , 2.122311 ),
( 9.195443 , 2.122311 ),
( 9.104749 , 2.122311 ),
( 8.900686 , 2.316756 ),
( 8.787318 , 2.344533 ),
( 8.696623 , 2.372311 ),
( 8.651276 , 2.427867 ),
( 8.719297 , 2.455644 ),
( 8.787318 , 2.650089 ),
( 8.832665 , 2.705644 ),
( 8.605929 , 2.677867 ),
( 8.537908 , 2.788978 ),
( 8.333845 , 2.788978 ),
( 7.925720 , 2.316756 ),
( 7.925720 , 2.261200 ),
( 7.903046 , 2.233422 ),
( 7.857699 , 2.233422 ),
( 7.857699 , 2.177867 ),
( 7.789678 , 1.983422 ),
( 7.812352 , 1.788978 ),
( 7.948394 , 1.538978 ),
( 7.971067 , 1.511200 ),
( 8.129783 , 1.511200 ),
( 8.243151 , 1.594533 ),
( 8.333845 , 1.594533 ),
( 8.424540 , 1.622311 ),
( 8.515234 , 1.566756 ),
( 8.673950 , 1.400089 ),
( 8.771174 , 1.291756 ),
( 8.828938 , 1.119878 ),
( 8.762504 , 0.972544 ),
( 9.238614 , 0.759633 ),
( 9.492323 , 0.627022 ),
( 9.820891 , 0.644711 ),
( 10.376567 , 0.800622 ),
( 10.651961 , 1.085978 ),
( 10.762173 , 1.132022 ),
( 10.943045 , 1.095989 ),
( 11.256739 , 0.999878 ),
( 11.576074 , 0.761611 ),
( 11.768247 , 0.425211 ),
( 11.960165 , 0.074778 ),
( 11.953907 , 0.000000 ),
( 11.629411 , 0.258767 ),
( 11.229920 , 0.582278 ),
( 11.001633 , 0.564300 ),
( 10.868476 , 0.447478 ),
( 10.633849 , 0.541833 ),
( 10.513370 , 0.672133 ),
( 11.188700 , 0.820078 ),
( 11.194014 , 0.859656 ),
( 11.118212 , 0.905822 ),
( 10.874860 , 0.930311 ),
( 10.427319 , 0.716522 ),
( 10.023620 , 0.374211 ),
( 9.434614 , 0.360144 ),
( 8.455131 , 0.859544 ),
( 8.180481 , 0.920500 ),
( 7.902529 , 1.115078 ),
( 7.823108 , 1.269800 ),
( 7.830482 , 1.403778 ),
( 7.791937 , 1.496744 ),
( 7.767005 , 1.538978 ),
( 7.676310 , 1.622311 ),
( 7.653637 , 1.650089 ),
( 7.585616 , 1.955644 ),
( 7.562942 , 1.983422 ),
( 7.562942 , 2.233422 ),
( 7.608289 , 2.400089 ),
( 7.630963 , 2.427867 ),
( 7.608289 , 2.538978 ),
( 7.585616 , 2.566756 ),
( 7.653637 , 2.705644 ),
( 7.630963 , 2.816756 ),
( 7.336206 , 3.011200 ),
( 7.290859 , 3.011200 ),
( 7.245511 , 3.011200 ),
( 7.041449 , 2.955644 ),
( 6.928081 , 2.816756 ),
( 6.928081 , 2.733422 ),
( 6.905407 , 2.622311 ),
( 6.860060 , 2.677867 ),
( 6.814712 , 2.677867 ),
( 6.678671 , 2.677867 ),
( 6.678671 , 2.733422 ),
( 6.769365 , 2.733422 ),
( 6.814712 , 2.733422 ),
( 6.792039 , 2.788978 ),
( 6.293219 , 3.066756 ),
( 6.225198 , 3.122311 ),
( 6.202525 , 3.233422 ),
( 6.134504 , 3.344533 ),
( 5.907767 , 3.261200 ),
( 5.862420 , 3.288978 ),
( 6.043809 , 3.427867 ),
( 6.021136 , 3.483422 ),
( 5.975788 , 3.483422 ),
( 5.930441 , 3.511200 ),
( 5.953115 , 3.566756 ),
( 5.975788 , 3.594533 ),
( 5.749052 , 3.788978 ),
( 5.703705 , 3.788978 ),
( 5.635684 , 3.788978 ),
( 5.703705 , 3.844533 ),
( 5.703705 , 4.011200 ),
( 5.499642 , 4.011200 ),
( 5.862420 , 4.372311 ),
( 5.975788 , 4.427867 ),
( 6.021136 , 4.427867 ),
( 6.089156 , 4.538978 ),
( 6.111830 , 4.566756 ),
( 6.089156 , 4.650089 ),
( 5.998462 , 4.650089 ),
( 5.817073 , 4.788978 ),
( 5.771726 , 4.816756 ),
( 5.681031 , 4.816756 ),
( 5.749052 , 4.927867 ),
( 5.749052 , 5.038978 ),
( 5.839747 , 5.177867 ),
( 5.998462 , 5.233422 ),
( 6.225198 , 5.233422 ),
( 6.270545 , 5.233422 ),
( 6.383914 , 5.288978 ),
( 6.406587 , 5.372311 ),
( 6.429261 , 5.400089 ),
( 6.587976 , 5.483422 ),
( 6.670626 , 5.490000 ),
( 6.700845 , 5.564100 ),
( 6.860060 , 5.927867 ),
( 6.860060 , 6.038978 ),
( 6.950754 , 6.205644 ),
( 6.973428 , 6.316756 ),
( 7.041449 , 6.344533 ),
( 7.064122 , 6.455644 ),
( 7.116072 , 6.541989 ),
( 7.114313 , 6.603667 ),
( 7.025305 , 6.741422 ),
( 6.736924 , 6.701367 ),
( 6.641658 , 6.741467 ),
( 6.500574 , 6.761389 ),
( 6.435410 , 6.733422 ),
( 6.224291 , 6.728556 ),
( 6.191759 , 6.738989 ),
( 6.099124 , 6.755000 ),
( 6.041805 , 6.749733 ),
( 6.001672 , 6.742967 ),
( 5.905382 , 6.718300 ),
( 5.817073 , 6.677867 ),
( 5.611713 , 6.686622 ),
( 5.401366 , 6.864333 ),
( 5.386274 , 6.927867 ),
( 5.356608 , 6.981811 ),
( 5.404095 , 7.111822 ),
( 5.561958 , 7.216133 ),
( 5.660643 , 7.244722 ),
( 5.366149 , 7.489478 ),
( 5.340927 , 7.511200 ),
( 5.114998 , 7.592867 ),
( 4.870667 , 7.692033 ),
( 4.746560 , 7.781856 ),
( 4.708060 , 7.760867 ),
( 4.692225 , 7.802500 ),
( 4.607090 , 7.849044 ),
( 4.481324 , 7.879711 ),
( 4.340031 , 8.093378 ),
( 4.181171 , 8.158044 ),
( 4.116415 , 8.200800 ),
( 4.081135 , 8.195278 ),
( 4.090912 , 8.272500 ),
( 4.032232 , 8.378311 ),
( 3.779566 , 8.791278 ),
( 3.769654 , 8.849022 ),
( 3.598177 , 8.955178 ),
( 3.576828 , 9.059633 ),
( 3.527037 , 9.066756 ),
( 3.498069 , 9.082022 ),
( 3.541865 , 9.174211 ),
( 3.542409 , 9.234411 ),
( 3.576275 , 9.262711 ),
( 3.582279 , 9.287744 ),
( 3.390995 , 9.316756 ),
( 3.209606 , 9.344533 ),
( 3.100836 , 9.367511 ),
( 2.957466 , 9.370756 ),
( 2.870844 , 9.366222 ),
( 2.777211 , 9.285222 ),
( 2.744851 , 9.285900 ),
( 2.775397 , 9.294867 ),
( 2.832661 , 9.341156 ),
( 2.868114 , 9.373300 ),
( 2.869502 , 9.400089 ),
( 2.794434 , 9.420178 ),
( 2.714423 , 9.440078 ),
( 2.641124 , 9.441944 ),
( 2.572096 , 9.428378 ),
( 2.548379 , 9.418600 ),
( 2.573130 , 9.388211 ),
( 2.563126 , 9.333567 ),
( 2.535855 , 9.320067 ),
( 2.517670 , 9.282778 ),
( 2.479488 , 9.260278 ),
( 2.483125 , 9.239067 ),
( 2.464034 , 9.224278 ),
( 2.468586 , 9.180556 ),
( 2.443129 , 9.168989 ),
( 2.439084 , 9.147456 ),
( 2.448389 , 9.129344 ),
( 2.444897 , 9.109600 ),
( 2.450720 , 9.097256 ),
( 2.444897 , 9.080389 ),
( 2.447808 , 9.045822 ),
( 2.424536 , 9.024011 ),
( 2.415811 , 9.000133 ),
( 2.442457 , 8.957422 ),
( 2.429887 , 8.946567 ),
( 2.455028 , 8.894556 ),
( 2.435936 , 8.879078 ),
( 2.413136 , 8.853411 ),
( 2.410805 , 8.836944 ),
( 2.412202 , 8.822133 ),
( 2.387533 , 8.789544 ),
( 2.386608 , 8.776044 ),
( 2.398706 , 8.757278 ),
( 2.373103 , 8.739511 ),
( 2.387070 , 8.769467 ),
( 2.375434 , 8.784611 ),
( 2.358674 , 8.785922 ),
( 2.337270 , 8.793167 ),
( 2.365195 , 8.790533 ),
( 2.399169 , 8.821478 ),
( 2.396376 , 8.837933 ),
( 2.408946 , 8.879078 ),
( 2.432218 , 8.894878 ),
( 2.414995 , 8.963022 ),
( 2.390961 , 8.983722 ),
( 2.340091 , 8.969389 ),
( 2.332091 , 8.946244 ),
( 2.340091 , 8.927722 ),
( 2.332091 , 8.912289 ),
( 2.316093 , 8.904067 ),
( 2.311730 , 8.874744 ),
( 2.288975 , 8.861244 ),
( 2.247727 , 8.856233 ),
( 2.233180 , 8.861889 ),
( 2.209436 , 8.859233 ),
( 2.231003 , 8.871144 ),
( 2.265911 , 8.873200 ),
( 2.277548 , 8.869600 ),
( 2.290635 , 8.873711 ),
( 2.299360 , 8.904578 ),
( 2.268088 , 8.909622 ),
( 2.247727 , 8.925256 ),
( 2.225734 , 8.920756 ),
( 2.208747 , 8.909622 ),
( 2.203768 , 8.921811 ),
( 2.214352 , 8.931822 ),
( 2.197138 , 8.933811 ),
( 2.148725 , 8.907478 ),
( 2.134577 , 8.904844 ),
( 2.113354 , 8.917222 ),
( 2.095107 , 8.918800 ),
( 2.079961 , 8.912944 ),
( 2.060761 , 8.913356 ),
( 2.034577 , 8.902656 ),
( 1.983589 , 8.895400 ),
( 2.033997 , 8.913356 ),
( 2.062502 , 8.918700 ),
( 2.092758 , 8.929811 ),
( 2.148090 , 8.928756 ),
( 2.168397 , 8.937878 ),
( 2.146421 , 8.965533 ),
( 2.182173 , 8.943933 ),
( 2.201537 , 8.951311 ),
( 2.239138 , 8.938400 ),
( 2.267063 , 8.944989 ),
( 2.284939 , 8.925767 ),
( 2.306887 , 8.926022 ),
( 2.311086 , 8.936356 ),
( 2.296312 , 8.952489 ),
( 2.317254 , 8.981122 ),
( 2.334939 , 9.003844 ),
( 2.374500 , 9.014044 ),
( 2.386136 , 9.034778 ),
( 2.401962 , 9.044656 ),
( 2.418723 , 9.044889 ),
( 2.426287 , 9.054878 ),
( 2.411739 , 9.063522 ),
( 2.426867 , 9.099311 ),
( 2.398362 , 9.125233 ),
( 2.373339 , 9.121944 ),
( 2.403595 , 9.134289 ),
( 2.417680 , 9.165778 ),
( 2.425860 , 9.192778 ),
( 2.423783 , 9.231400 ),
( 2.400330 , 9.237022 ),
( 2.419494 , 9.243567 ),
( 2.429815 , 9.246711 ),
( 2.449495 , 9.245489 ),
( 2.457676 , 9.289856 ),
( 2.481311 , 9.298211 ),
( 2.488585 , 9.334211 ),
( 2.520255 , 9.353822 ),
( 2.520400 , 9.369944 ),
( 2.494960 , 9.432511 ),
( 2.463671 , 9.469200 ),
( 2.406950 , 9.500578 ),
( 2.240907 , 9.536433 ),
( 2.129969 , 9.569467 ),
( 2.031530 , 9.607422 ),
( 1.932328 , 9.658044 ),
( 1.835167 , 9.695656 ),
( 1.746196 , 9.760744 ),
( 1.667446 , 9.789667 ),
( 1.575400 , 9.797622 ),
( 1.562104 , 9.828722 ),
( 1.531422 , 9.846800 ),
( 1.415859 , 9.888744 ),
( 1.315206 , 9.942167 ),
( 1.175573 , 10.083667 ),
( 1.147394 , 10.090267 ),
( 1.118064 , 10.086567 ),
( 0.990883 , 9.998400 ),
( 0.778930 , 9.990856 ),
( 0.592924 , 10.033144 ),
( 0.507490 , 10.125422 ),
( 0.419562 , 10.320811 ),
( 0.375403 , 10.344533 ),
( 0.276464 , 10.431189 ),
( 0.220170 , 10.534911 ),
( 0.181271 , 10.571000 ),
( 0.153745 , 10.620156 ),
( 0.114973 , 10.653889 ),
( 0.103274 , 10.707756 ),
( 0.097914 , 10.761511 ),
( 0.076256 , 10.811522 ),
( 0.061935 , 10.867833 ),
( 0.000000 , 10.960167 )
]
x, y = to_xy(coast)
cst = ramerdouglas(coast, 0.1)
xx, yy = to_xy(cst)
print(xx, yy)
plt.plot(x, y, "bo", xx, yy, "r^")
plt.show()
| mit |
rlzijdeman/nlgis2-1 | maps/bin/viewer.py | 4 | 2527 |
# coding: utf-8
# In[1]:
#!/usr/bin/python
import urllib2
import simplejson
import json
import sys
from shapely.geometry import shape, Polygon, MultiPolygon
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from pylab import *
# Example of polygon
co1 = {"type": "Polygon", "coordinates": [
[(-102.05, 41.0),
(-102.05, 37.0),
(-109.05, 37.0),
(-109.05, 41.0)]]}
varyear = None
varcode = None
if sys.argv[1]:
varcode = sys.argv[1]
if len(sys.argv) > 2:
varyear = sys.argv[2]
# In[5]:
# Default
debug = 0
varcode = 10426
varyear = 1997
varname = "Amsterdam"
apiurl = "http://node-128.dev.socialhistoryservices.org/api/maps"
def getmap(apiurl, code, year, cityname):
amscode = str(code)
if cityname:
amscode = ''
jsondataurl = apiurl + "?year=" + str(year) + "&format=geojson"
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
datapolygons = simplejson.load(f)
def coordinates(polygons, amscode, cityname):
for key in polygons:
if key == 'features':
data = polygons[key]
for key in data:
response = json.dumps(key)
dict = json.loads(response)
for key in dict:
if key == 'properties':
maincode = str(dict[key]['amsterdamcode'])
mainname = dict[key]['name']
if maincode == amscode:
co = dict['geometry']['coordinates']
if mainname.encode('utf-8') == cityname:
co = dict['geometry']['coordinates']
return co
coords = coordinates(datapolygons, amscode, cityname)
x = [i for i,j in coords[0][0]]
y = [j for i,j in coords[0][0]]
return (x,y)
colors = ['red', 'green', 'orange', 'brown', 'purple']
(x,y) = getmap(apiurl, varcode, varyear, varname)
fig = plt.figure()
ax = fig.gca()
ax.plot(x,y)
ax.axis('scaled')
fig.savefig('myplot.png')
plt.show()
#from pyproj import Proj
#pa = Proj("+proj=aea +lat_1=37.0 +lat_2=41.0 +lat_0=39.0 +lon_0=-106.55")
#lon, lat = zip(x[0],y[0])
cop = {"type": "Polygon", "coordinates": [zip(x, y)]}
#x, y = pa(lon, lat)
debug = 1
if debug:
print cop
#shape = shape(cop)
#print shape.type
#print shape.area
# In[ ]:
| gpl-3.0 |
wzbozon/statsmodels | statsmodels/stats/anova.py | 25 | 13433 | from statsmodels.compat.python import lrange, lmap
import numpy as np
from scipy import stats
from pandas import DataFrame, Index
from statsmodels.formula.formulatools import (_remove_intercept_patsy,
_has_intercept, _intercept_idx)
def _get_covariance(model, robust):
if robust is None:
return model.cov_params()
elif robust == "hc0":
se = model.HC0_se
return model.cov_HC0
elif robust == "hc1":
se = model.HC1_se
return model.cov_HC1
elif robust == "hc2":
se = model.HC2_se
return model.cov_HC2
elif robust == "hc3":
se = model.HC3_se
return model.cov_HC3
else: # pragma: no cover
raise ValueError("robust options %s not understood" % robust)
#NOTE: these need to take into account weights !
def anova_single(model, **kwargs):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
typ : int or str {1,2,3} or {"I","II","III"}
Type of sum of squares to use.
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
typ = kwargs.get("typ", 1)
robust = kwargs.get("robust", None)
if robust:
robust = robust.lower()
endog = model.model.endog
exog = model.model.exog
nobs = exog.shape[0]
response_name = model.model.endog_names
design_info = model.model.data.design_info
exog_names = model.model.exog_names
# +1 for resids
n_rows = (len(design_info.terms) - _has_intercept(design_info) + 1)
pr_test = "PR(>%s)" % test
names = ['df', 'sum_sq', 'mean_sq', test, pr_test]
table = DataFrame(np.zeros((n_rows, 5)), columns = names)
if typ in [1,"I"]:
return anova1_lm_single(model, endog, exog, nobs, design_info, table,
n_rows, test, pr_test, robust)
elif typ in [2, "II"]:
return anova2_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [3, "III"]:
return anova3_lm_single(model, design_info, n_rows, test, pr_test,
robust)
elif typ in [4, "IV"]:
raise NotImplemented("Type IV not yet implemented")
else: # pragma: no cover
raise ValueError("Type %s not understood" % str(typ))
def anova1_lm_single(model, endog, exog, nobs, design_info, table, n_rows, test,
pr_test, robust):
"""
ANOVA table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
"""
#maybe we should rethink using pinv > qr in OLS/linear models?
effects = getattr(model, 'effects', None)
if effects is None:
q,r = np.linalg.qr(exog)
effects = np.dot(q.T, endog)
arr = np.zeros((len(design_info.terms), len(design_info.column_names)))
slices = [design_info.slice(name) for name in design_info.term_names]
for i,slice_ in enumerate(slices):
arr[i, slice_] = 1
sum_sq = np.dot(arr, effects**2)
#NOTE: assumes intercept is first column
idx = _intercept_idx(design_info)
sum_sq = sum_sq[~idx]
term_names = np.array(design_info.term_names) # want boolean indexing
term_names = term_names[~idx]
index = term_names.tolist()
table.index = Index(index + ['Residual'])
table.ix[index, ['df', 'sum_sq']] = np.c_[arr[~idx].sum(1), sum_sq]
if test == 'F':
table.ix[:n_rows, test] = ((table['sum_sq']/table['df'])/
(model.ssr/model.df_resid))
table.ix[:n_rows, pr_test] = stats.f.sf(table["F"], table["df"],
model.df_resid)
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
table['mean_sq'] = table['sum_sq'] / table['df']
return table
#NOTE: the below is not agnostic about formula...
def anova2_lm_single(model, design_info, n_rows, test, pr_test, robust):
"""
ANOVA type II table for one fitted linear model.
Parameters
----------
model : fitted linear model results instance
A fitted linear model
**kwargs**
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
Notes
-----
Use of this function is discouraged. Use anova_lm instead.
Type II
Sum of Squares compares marginal contribution of terms. Thus, it is
not particularly useful for models with significant interaction terms.
"""
terms_info = design_info.terms[:] # copy
terms_info = _remove_intercept_patsy(terms_info)
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, None)
robust_cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab all varaibles except interaction effects that contain term
# need two hypotheses matrices L1 is most restrictive, ie., term==0
# L2 is everything except term==0
cols = design_info.slice(term)
L1 = lrange(cols.start, cols.stop)
L2 = []
term_set = set(term.factors)
for t in terms_info: # for the term you have
other_set = set(t.factors)
if term_set.issubset(other_set) and not term_set == other_set:
col = design_info.slice(t)
# on a higher order term containing current `term`
L1.extend(lrange(col.start, col.stop))
L2.extend(lrange(col.start, col.stop))
L1 = np.eye(model.model.exog.shape[1])[L1]
L2 = np.eye(model.model.exog.shape[1])[L2]
if L2.size:
LVL = np.dot(np.dot(L1,robust_cov),L2.T)
from scipy import linalg
orth_compl,_ = linalg.qr(LVL)
r = L1.shape[0] - L2.shape[0]
# L1|2
# use the non-unique orthogonal completion since L12 is rank r
L12 = np.dot(orth_compl[:,-r:].T, L1)
else:
L12 = L1
r = L1.shape[0]
#from IPython.core.debugger import Pdb; Pdb().set_trace()
if test == 'F':
f = model.f_test(L12, cov_p=robust_cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova3_lm_single(model, design_info, n_rows, test, pr_test, robust):
n_rows += _has_intercept(design_info)
terms_info = design_info.terms
names = ['sum_sq', 'df', test, pr_test]
table = DataFrame(np.zeros((n_rows, 4)), columns = names)
cov = _get_covariance(model, robust)
col_order = []
index = []
for i, term in enumerate(terms_info):
# grab term, hypothesis is that term == 0
cols = design_info.slice(term)
L1 = np.eye(model.model.exog.shape[1])[cols]
L12 = L1
r = L1.shape[0]
if test == 'F':
f = model.f_test(L12, cov_p=cov)
table.ix[i, test] = test_value = f.fvalue
table.ix[i, pr_test] = f.pvalue
# need to back out SSR from f_test
table.ix[i, 'df'] = r
#col_order.append(cols.start)
index.append(term.name())
table.index = Index(index + ['Residual'])
#NOTE: Don't need to sort because terms are an ordered dict now
#table = table.ix[np.argsort(col_order + [model.model.exog.shape[1]+1])]
# back out sum of squares from f_test
ssr = table[test] * table['df'] * model.ssr/model.df_resid
table['sum_sq'] = ssr
# fill in residual
table.ix['Residual', ['sum_sq','df', test, pr_test]] = (model.ssr,
model.df_resid,
np.nan, np.nan)
return table
def anova_lm(*args, **kwargs):
"""
ANOVA table for one or more fitted linear models.
Parameters
----------
args : fitted linear model results instance
One or more fitted linear models
scale : float
Estimate of variance, If None, will be estimated from the largest
model. Default is None.
test : str {"F", "Chisq", "Cp"} or None
Test statistics to provide. Default is "F".
typ : str or int {"I","II","III"} or {1,2,3}
The type of ANOVA test to perform. See notes.
robust : {None, "hc0", "hc1", "hc2", "hc3"}
Use heteroscedasticity-corrected coefficient covariance matrix.
If robust covariance is desired, it is recommended to use `hc3`.
Returns
-------
anova : DataFrame
A DataFrame containing.
Notes
-----
Model statistics are given in the order of args. Models must have
been fit using the formula api.
See Also
--------
model_results.compare_f_test, model_results.compare_lm_test
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.formula.api import ols
>>> moore = sm.datasets.get_rdataset("Moore", "car",
... cache=True) # load data
>>> data = moore.data
>>> data = data.rename(columns={"partner.status" :
... "partner_status"}) # make name pythonic
>>> moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
... data=data).fit()
>>> table = sm.stats.anova_lm(moore_lm, typ=2) # Type 2 ANOVA DataFrame
>>> print table
"""
typ = kwargs.get('typ', 1)
### Farm Out Single model ANOVA Type I, II, III, and IV ###
if len(args) == 1:
model = args[0]
return anova_single(model, **kwargs)
try:
assert typ in [1,"I"]
except:
raise ValueError("Multiple models only supported for type I. "
"Got type %s" % str(typ))
### COMPUTE ANOVA TYPE I ###
# if given a single model
if len(args) == 1:
return anova_single(*args, **kwargs)
# received multiple fitted models
test = kwargs.get("test", "F")
scale = kwargs.get("scale", None)
n_models = len(args)
model_formula = []
pr_test = "Pr(>%s)" % test
names = ['df_resid', 'ssr', 'df_diff', 'ss_diff', test, pr_test]
table = DataFrame(np.zeros((n_models, 6)), columns = names)
if not scale: # assume biggest model is last
scale = args[-1].scale
table["ssr"] = lmap(getattr, args, ["ssr"]*n_models)
table["df_resid"] = lmap(getattr, args, ["df_resid"]*n_models)
table.ix[1:, "df_diff"] = -np.diff(table["df_resid"].values)
table["ss_diff"] = -table["ssr"].diff()
if test == "F":
table["F"] = table["ss_diff"] / table["df_diff"] / scale
table[pr_test] = stats.f.sf(table["F"], table["df_diff"],
table["df_resid"])
# for earlier scipy - stats.f.sf(np.nan, 10, 2) -> 0 not nan
table[pr_test][table['F'].isnull()] = np.nan
return table
if __name__ == "__main__":
import pandas
from statsmodels.formula.api import ols
# in R
#library(car)
#write.csv(Moore, "moore.csv", row.names=FALSE)
moore = pandas.read_table('moore.csv', delimiter=",", skiprows=1,
names=['partner_status','conformity',
'fcategory','fscore'])
moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
data=moore).fit()
mooreB = ols('conformity ~ C(partner_status, Sum)', data=moore).fit()
# for each term you just want to test vs the model without its
# higher-order terms
# using Monette-Fox slides and Marden class notes for linear algebra /
# orthogonal complement
# https://netfiles.uiuc.edu/jimarden/www/Classes/STAT324/
table = anova_lm(moore_lm, typ=2)
| bsd-3-clause |
mdtraj/mdtraj | mdtraj/formats/pdb/pdbfile.py | 1 | 30853 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Carlos Hernandez, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
# Portions copyright (c) 2012 Stanford University and the Authors.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit. Those portions are Copyright 2008-2012 Stanford University
# and Peter Eastman, and distributed under the following license:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
from __future__ import print_function, division
import os
from datetime import date
import gzip
import numpy as np
import xml.etree.ElementTree as etree
from copy import copy, deepcopy
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.core.topology import Topology
from mdtraj.utils import ilen, cast_indices, in_units_of, open_maybe_zipped
from mdtraj.formats.registry import FormatRegistry
from mdtraj.core import element as elem
from mdtraj.utils import six
from mdtraj import version
import warnings
if six.PY3:
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import (uses_relative, uses_netloc, uses_params)
else:
from urllib2 import urlopen
from urlparse import urlparse
from urlparse import uses_relative, uses_netloc, uses_params
# Ugly hack -- we don't always issue UserWarning in Py2, but we need to in
# this module
warnings.filterwarnings('always', category=UserWarning, module=__name__)
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
__all__ = ['load_pdb', 'PDBTrajectoryFile']
##############################################################################
# Code
##############################################################################
def _is_url(url):
"""Check to see if a URL has a valid protocol.
from pandas/io.common.py Copyright 2014 Pandas Developers
Used under the BSD licence
"""
try:
return urlparse(url).scheme in _VALID_URLS
except (AttributeError, TypeError):
return False
@FormatRegistry.register_loader('.pdb')
@FormatRegistry.register_loader('.pdb.gz')
def load_pdb(filename, stride=None, atom_indices=None, frame=None,
no_boxchk=False, standard_names=True, top=None):
"""Load a RCSB Protein Data Bank file from disk.
Parameters
----------
filename : str
Path to the PDB file on disk. The string could be a URL. Valid URL
schemes include http and ftp.
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, default=None
If not None, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDB
format). So if you want to load only the first atom in the file, you
would supply ``atom_indices = np.array([0])``.
frame : int, default=None
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
no_boxchk : bool, default=False
By default, a heuristic check based on the particle density will be
performed to determine if the unit cell dimensions are absurd. If the
particle density is >1000 atoms per nm^3, the unit cell will be
discarded. This is done because all PDB files from RCSB contain a CRYST1
record, even if there are no periodic boundaries, and dummy values are
filled in instead. This check will filter out those false unit cells and
avoid potential errors in geometry calculations. Set this variable to
``True`` in order to skip this heuristic check.
standard_names : bool, default=True
If True, non-standard atomnames and residuenames are standardized to conform
with the current PDB format version. If set to false, this step is skipped.
top : mdtraj.core.Topology, default=None
if you give a topology as input the topology won't be parsed from the pdb file
it saves time if you have to parse a big number of files
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
Examples
--------
>>> import mdtraj as md
>>> pdb = md.load_pdb('2EQQ.pdb')
>>> print(pdb)
<mdtraj.Trajectory with 20 frames, 423 atoms at 0x110740a90>
See Also
--------
mdtraj.PDBTrajectoryFile : Low level interface to PDB files
"""
from mdtraj import Trajectory
if not isinstance(filename, six.string_types):
raise TypeError('filename must be of type string for load_pdb. '
'you supplied %s' % type(filename))
atom_indices = cast_indices(atom_indices)
filename = str(filename)
with PDBTrajectoryFile(filename, standard_names=standard_names, top=top) as f:
atom_slice = slice(None) if atom_indices is None else atom_indices
if frame is not None:
coords = f.positions[[frame], atom_slice, :]
else:
coords = f.positions[::stride, atom_slice, :]
assert coords.ndim == 3, 'internal shape error'
n_frames = len(coords)
topology = f.topology
if atom_indices is not None:
# The input topology shouldn't be modified because
# subset makes a copy inside the function
topology = topology.subset(atom_indices)
if f.unitcell_angles is not None and f.unitcell_lengths is not None:
unitcell_lengths = np.array([f.unitcell_lengths] * n_frames)
unitcell_angles = np.array([f.unitcell_angles] * n_frames)
else:
unitcell_lengths = None
unitcell_angles = None
in_units_of(coords, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(unitcell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(len(coords))
if frame is not None:
time *= frame
elif stride is not None:
time *= stride
traj = Trajectory(xyz=coords, time=time, topology=topology,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
if not no_boxchk and traj.unitcell_lengths is not None:
# Only one CRYST1 record is allowed, so only do this check for the first
# frame. Some RCSB PDB files do not *really* have a unit cell, but still
# have a CRYST1 record with a dummy definition. These boxes are usually
# tiny (e.g., 1 A^3), so check that the particle density in the unit
# cell is not absurdly high. Standard water density is ~55 M, which
# yields a particle density ~100 atoms per cubic nm. It should be safe
# to say that no particle density should exceed 10x that.
particle_density = traj.top.n_atoms / traj.unitcell_volumes[0]
if particle_density > 1000:
warnings.warn('Unlikely unit cell vectors detected in PDB file likely '
'resulting from a dummy CRYST1 record. Discarding unit '
'cell vectors.', category=UserWarning)
traj._unitcell_lengths = traj._unitcell_angles = None
return traj
@FormatRegistry.register_fileobject('.pdb')
@FormatRegistry.register_fileobject('.pdb.gz')
class PDBTrajectoryFile(object):
"""Interface for reading and writing Protein Data Bank (PDB) files
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
standard_names : bool, default=True
If True, non-standard atomnames and residuenames are standardized to conform
with the current PDB format version. If set to false, this step is skipped.
top : mdtraj.core.Topology, default=None
if you give a topology as input the topology won't be parsed from the pdb file
it saves time if you have to parse a big number of files
Attributes
----------
positions : np.ndarray, shape=(n_frames, n_atoms, 3)
topology : mdtraj.Topology
closed : bool
Notes
-----
When writing pdb files, mdtraj follows the PDB3.0 standard as closely as
possible. During *reading* however, we try to be more lenient. For instance,
we will parse common nonstandard atom names during reading, and convert them
into the standard names. The replacement table used by mdtraj is at
{mdtraj_source}/formats/pdb/data/pdbNames.xml.
See Also
--------
mdtraj.load_pdb : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'angstroms'
_residueNameReplacements = {}
_atomNameReplacements = {}
_chain_names = [chr(ord('A') + i) for i in range(26)]
def __init__(self, filename, mode='r', force_overwrite=True, standard_names=True, top=None):
self._open = False
self._file = None
self._topology = top
self._positions = None
self._mode = mode
self._last_topology = None
self._standard_names = standard_names
if mode == 'r':
PDBTrajectoryFile._loadNameReplacementTables()
if _is_url(filename):
self._file = urlopen(filename)
if filename.lower().endswith('.gz'):
if six.PY3:
self._file = gzip.GzipFile(fileobj=self._file)
else:
self._file = gzip.GzipFile(fileobj=six.StringIO(
self._file.read()))
if six.PY3:
self._file = six.StringIO(self._file.read().decode('utf-8'))
else:
self._file = open_maybe_zipped(filename, 'r')
self._read_models()
elif mode == 'w':
self._header_written = False
self._footer_written = False
self._file = open_maybe_zipped(filename, 'w', force_overwrite)
else:
raise ValueError("invalid mode: %s" % mode)
self._open = True
def write(self, positions, topology, modelIndex=None, unitcell_lengths=None,
unitcell_angles=None, bfactors=None):
"""Write a PDB file to disk
Parameters
----------
positions : array_like
The list of atomic positions to write.
topology : mdtraj.Topology
The Topology defining the model to write.
modelIndex : {int, None}
If not None, the model will be surrounded by MODEL/ENDMDL records
with this index
unitcell_lengths : {tuple, None}
Lengths of the three unit cell vectors, or None for a non-periodic system
unitcell_angles : {tuple, None}
Angles between the three unit cell vectors, or None for a non-periodic system
bfactors : array_like, default=None, shape=(n_atoms,)
Save bfactors with pdb file. Should contain a single number for
each atom in the topology
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if not self._header_written:
self._write_header(unitcell_lengths, unitcell_angles)
self._header_written = True
if ilen(topology.atoms) != len(positions):
raise ValueError('The number of positions must match the number of atoms')
if np.any(np.isnan(positions)):
raise ValueError('Particle position is NaN')
if np.any(np.isinf(positions)):
raise ValueError('Particle position is infinite')
self._last_topology = topology # Hack to save the topology of the last frame written, allows us to output CONECT entries in write_footer()
if bfactors is None:
bfactors = ['{0:5.2f}'.format(0.0)] * len(positions)
else:
if (np.max(bfactors) >= 100) or (np.min(bfactors) <= -10):
raise ValueError("bfactors must be in (-10, 100)")
bfactors = ['{0:5.2f}'.format(b) for b in bfactors]
atomIndex = 1
posIndex = 0
if modelIndex is not None:
print("MODEL %4d" % modelIndex, file=self._file)
for (chainIndex, chain) in enumerate(topology.chains):
chainName = self._chain_names[chainIndex % len(self._chain_names)]
residues = list(chain.residues)
for (resIndex, res) in enumerate(residues):
if len(res.name) > 3:
resName = res.name[:3]
else:
resName = res.name
for atom in res.atoms:
if len(atom.name) < 4 and atom.name[:1].isalpha() and (atom.element is None or len(atom.element.symbol) < 2):
atomName = ' '+atom.name
elif len(atom.name) > 4:
atomName = atom.name[:4]
else:
atomName = atom.name
coords = positions[posIndex]
if atom.element is not None:
symbol = atom.element.symbol
else:
symbol = ' '
if atom.serial is not None and len(topology._chains) < 2:
# We can't do this for more than 1 chain
# to prevent issue 1611
atomSerial = atom.serial
else:
atomSerial = atomIndex
line = "ATOM %5d %-4s %3s %1s%4d %s%s%s 1.00 %5s %-4s%2s " % ( # Right-justify atom symbol
atomSerial % 100000, atomName, resName, chainName,
(res.resSeq) % 10000, _format_83(coords[0]),
_format_83(coords[1]), _format_83(coords[2]),
bfactors[posIndex], atom.segment_id[:4], symbol[-2:])
assert len(line) == 80, 'Fixed width overflow detected'
print(line, file=self._file)
posIndex += 1
atomIndex += 1
if resIndex == len(residues)-1:
print("TER %5d %3s %s%4d" % (atomSerial+1, resName, chainName, res.resSeq), file=self._file)
atomIndex += 1
if modelIndex is not None:
print("ENDMDL", file=self._file)
def _write_header(self, unitcell_lengths, unitcell_angles, write_metadata=True):
"""Write out the header for a PDB file.
Parameters
----------
unitcell_lengths : {tuple, None}
The lengths of the three unitcell vectors, ``a``, ``b``, ``c``
unitcell_angles : {tuple, None}
The angles between the three unitcell vectors, ``alpha``,
``beta``, ``gamma``
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if unitcell_lengths is None and unitcell_angles is None:
return
if unitcell_lengths is not None and unitcell_angles is not None:
if not len(unitcell_lengths) == 3:
raise ValueError('unitcell_lengths must be length 3')
if not len(unitcell_angles) == 3:
raise ValueError('unitcell_angles must be length 3')
else:
raise ValueError('either unitcell_lengths and unitcell_angles'
'should both be spefied, or neither')
box = list(unitcell_lengths) + list(unitcell_angles)
assert len(box) == 6
if write_metadata:
print("REMARK 1 CREATED WITH MDTraj %s, %s" % (version.version, str(date.today())), file=self._file)
print("CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1 1 " % tuple(box), file=self._file)
def _write_footer(self):
if not self._mode == 'w':
raise ValueError('file not opened for writing')
# Identify bonds that should be listed as CONECT records.
standardResidues = ['ALA', 'ASN', 'CYS', 'GLU', 'HIS', 'LEU', 'MET', 'PRO', 'THR', 'TYR',
'ARG', 'ASP', 'GLN', 'GLY', 'ILE', 'LYS', 'PHE', 'SER', 'TRP', 'VAL',
'A', 'G', 'C', 'U', 'I', 'DA', 'DG', 'DC', 'DT', 'DI', 'HOH']
conectBonds = []
if self._last_topology is not None:
for atom1, atom2 in self._last_topology.bonds:
if atom1.residue.name not in standardResidues or atom2.residue.name not in standardResidues:
conectBonds.append((atom1, atom2))
elif atom1.name == 'SG' and atom2.name == 'SG' and atom1.residue.name == 'CYS' and atom2.residue.name == 'CYS':
conectBonds.append((atom1, atom2))
if len(conectBonds) > 0:
# Work out the index used in the PDB file for each atom.
atomIndex = {}
nextAtomIndex = 0
prevChain = None
for chain in self._last_topology.chains:
for atom in chain.atoms:
if atom.residue.chain != prevChain:
nextAtomIndex += 1
prevChain = atom.residue.chain
atomIndex[atom] = nextAtomIndex
nextAtomIndex += 1
# Record which other atoms each atom is bonded to.
atomBonds = {}
for atom1, atom2 in conectBonds:
index1 = atomIndex[atom1]
index2 = atomIndex[atom2]
if index1 not in atomBonds:
atomBonds[index1] = []
if index2 not in atomBonds:
atomBonds[index2] = []
atomBonds[index1].append(index2)
atomBonds[index2].append(index1)
# Write the CONECT records.
for index1 in sorted(atomBonds):
bonded = atomBonds[index1]
while len(bonded) > 4:
print("CONECT%5d%5d%5d%5d" % (index1, bonded[0], bonded[1], bonded[2]), file=self._file)
del bonded[:4]
line = "CONECT%5d" % index1
for index2 in bonded:
line = "%s%5d" % (line, index2)
print(line, file=self._file)
print("END", file=self._file)
self._footer_written = True
@classmethod
def set_chain_names(cls, values):
"""Set the cycle of chain names used when writing PDB files
When writing PDB files, PDBTrajectoryFile translates each chain's
index into a name -- the name is what's written in the file. By
default, chains are named with the letters A-Z.
Parameters
----------
values : list
A list of chacters (strings of length 1) that the PDB writer will
cycle through to choose chain names.
"""
for item in values:
if not isinstance(item, six.string_types) and len(item) == 1:
raise TypeError('Names must be a single character string')
cls._chain_names = values
@property
def positions(self):
"""The cartesian coordinates of all of the atoms in each frame. Available when a file is opened in mode='r'
"""
return self._positions
@property
def topology(self):
"""The topology from this PDB file. Available when a file is opened in mode='r'
"""
return self._topology
@property
def unitcell_lengths(self):
"The unitcell lengths (3-tuple) in this PDB file. May be None"
return self._unitcell_lengths
@property
def unitcell_angles(self):
"The unitcell angles (3-tuple) in this PDB file. May be None"
return self._unitcell_angles
@property
def closed(self):
"Whether the file is closed"
return not self._open
def close(self):
"Close the PDB file"
if self._mode == 'w' and not self._footer_written:
self._write_footer()
if self._open:
self._file.close()
self._open = False
def _read_models(self):
if not self._mode == 'r':
raise ValueError('file not opened for reading')
pdb = PdbStructure(self._file, load_all_models=True)
# load all of the positions (from every model)
_positions = []
for model in pdb.iter_models(use_all_models=True):
coords = []
for chain in model.iter_chains():
for residue in chain.iter_residues():
for atom in residue.atoms:
coords.append(atom.get_position())
_positions.append(coords)
if not all(len(f) == len(_positions[0]) for f in _positions):
raise ValueError('PDB Error: All MODELs must contain the same number of ATOMs')
self._positions = np.array(_positions)
## The atom positions read from the PDB file
self._unitcell_lengths = pdb.get_unit_cell_lengths()
self._unitcell_angles = pdb.get_unit_cell_angles()
# Load the topology if None is given
if self._topology is None:
self._topology = Topology()
atomByNumber = {}
for chain in pdb.iter_chains():
c = self._topology.add_chain()
for residue in chain.iter_residues():
resName = residue.get_name()
if resName in PDBTrajectoryFile._residueNameReplacements and self._standard_names:
resName = PDBTrajectoryFile._residueNameReplacements[resName]
r = self._topology.add_residue(resName, c, residue.number, residue.segment_id)
if resName in PDBTrajectoryFile._atomNameReplacements and self._standard_names:
atomReplacements = PDBTrajectoryFile._atomNameReplacements[resName]
else:
atomReplacements = {}
for atom in residue.atoms:
atomName = atom.get_name()
if atomName in atomReplacements:
atomName = atomReplacements[atomName]
atomName = atomName.strip()
element = atom.element
if element is None:
element = PDBTrajectoryFile._guess_element(atomName, residue.name, len(residue))
newAtom = self._topology.add_atom(atomName, element, r, serial=atom.serial_number)
atomByNumber[atom.serial_number] = newAtom
self._topology.create_standard_bonds()
self._topology.create_disulfide_bonds(self.positions[0])
# Add bonds based on CONECT records.
connectBonds = []
for connect in pdb.models[-1].connects:
i = connect[0]
for j in connect[1:]:
if i in atomByNumber and j in atomByNumber:
connectBonds.append((atomByNumber[i], atomByNumber[j]))
if len(connectBonds) > 0:
# Only add bonds that don't already exist.
existingBonds = set(self._topology.bonds)
for bond in connectBonds:
if bond not in existingBonds and (bond[1], bond[0]) not in existingBonds:
self._topology.add_bond(bond[0], bond[1])
existingBonds.add(bond)
@staticmethod
def _loadNameReplacementTables():
"""Load the list of atom and residue name replacements."""
if len(PDBTrajectoryFile._residueNameReplacements) == 0:
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'data', 'pdbNames.xml'))
allResidues = {}
proteinResidues = {}
nucleicAcidResidues = {}
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
if name == 'All':
PDBTrajectoryFile._parseResidueAtoms(residue, allResidues)
elif name == 'Protein':
PDBTrajectoryFile._parseResidueAtoms(residue, proteinResidues)
elif name == 'Nucleic':
PDBTrajectoryFile._parseResidueAtoms(residue, nucleicAcidResidues)
for atom in allResidues:
proteinResidues[atom] = allResidues[atom]
nucleicAcidResidues[atom] = allResidues[atom]
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
for id in residue.attrib:
if id == 'name' or id.startswith('alt'):
PDBTrajectoryFile._residueNameReplacements[residue.attrib[id]] = name
if 'type' not in residue.attrib:
atoms = copy(allResidues)
elif residue.attrib['type'] == 'Protein':
atoms = copy(proteinResidues)
elif residue.attrib['type'] == 'Nucleic':
atoms = copy(nucleicAcidResidues)
else:
atoms = copy(allResidues)
PDBTrajectoryFile._parseResidueAtoms(residue, atoms)
PDBTrajectoryFile._atomNameReplacements[name] = atoms
@staticmethod
def _guess_element(atom_name, residue_name, residue_length):
"Try to guess the element name"
upper = atom_name.upper()
if upper.startswith('CL'):
element = elem.chlorine
elif upper.startswith('NA'):
element = elem.sodium
elif upper.startswith('MG'):
element = elem.magnesium
elif upper.startswith('BE'):
element = elem.beryllium
elif upper.startswith('LI'):
element = elem.lithium
elif upper.startswith('K'):
element = elem.potassium
elif upper.startswith('ZN'):
element = elem.zinc
elif residue_length == 1 and upper.startswith('CA'):
element = elem.calcium
# TJL has edited this. There are a few issues here. First,
# parsing for the element is non-trivial, so I do my best
# below. Second, there is additional parsing code in
# pdbstructure.py, and I am unsure why it doesn't get used
# here...
elif residue_length > 1 and upper.startswith('CE'):
element = elem.carbon # (probably) not Celenium...
elif residue_length > 1 and upper.startswith('CD'):
element = elem.carbon # (probably) not Cadmium...
elif residue_name in ['TRP', 'ARG', 'GLN', 'HIS'] and upper.startswith('NE'):
element = elem.nitrogen # (probably) not Neon...
elif residue_name in ['ASN'] and upper.startswith('ND'):
element = elem.nitrogen # (probably) not ND...
elif residue_name == 'CYS' and upper.startswith('SG'):
element = elem.sulfur # (probably) not SG...
else:
try:
element = elem.get_by_symbol(atom_name[0])
except KeyError:
try:
symbol = atom_name[0:2].strip().rstrip("AB0123456789").lstrip("0123456789")
element = elem.get_by_symbol(symbol)
except KeyError:
element = None
return element
@staticmethod
def _parseResidueAtoms(residue, map):
for atom in residue.findall('Atom'):
name = atom.attrib['name']
for id in atom.attrib:
map[atom.attrib[id]] = name
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __len__(self):
"Number of frames in the file"
if str(self._mode) != 'r':
raise NotImplementedError('len() only available in mode="r" currently')
if not self._open:
raise ValueError('I/O operation on closed file')
return len(self._positions)
def _format_83(f):
"""Format a single float into a string of width 8, with ideally 3 decimal
places of precision. If the number is a little too large, we can
gracefully degrade the precision by lopping off some of the decimal
places. If it's much too large, we throw a ValueError"""
if -999.999 < f < 9999.999:
return '%8.3f' % f
if -9999999 < f < 99999999:
return ('%8.3f' % f)[:8]
raise ValueError('coordinate "%s" could not be represnted '
'in a width-8 field' % f)
| lgpl-2.1 |
fivetentaylor/pytextools | setup.py | 1 | 1042 | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='pytextools',
version='0.1',
description='Family of command line text processing tools that builds upon the gnu core utils',
url='https://github.com/fivetentaylor/pytextools',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing :: Linguistic',
],
scripts=[
'pytextools/bin/describe',
'pytextools/bin/jscut',
'pytextools/bin/jslist2csv',
'pytextools/bin/jsprint',
'pytextools/bin/rm_quotes',
'pytextools/bin/scatter',
'pytextools/bin/tdate',
'pytextools/bin/tuniq',
'pytextools/bin/widen',
],
author='Taylor Sather',
author_email='[email protected]',
license='MIT',
packages=['pytextools'],
install_requires=[
'pandas','ipdb',
'matplotlib','numpy',
],
dependency_links=[
'https://github.com/fivetentaylor/pytextools.git'
],
zip_safe=False)
| mit |
rpoleski/MulensModel | source/MulensModel/mulensdata.py | 1 | 26796 | import numpy as np
import matplotlib.pyplot as plt
from os.path import basename, exists
from astropy.coordinates import SkyCoord
from astropy import units as u
from MulensModel.utils import Utils
from MulensModel.satelliteskycoord import SatelliteSkyCoord
from MulensModel.coordinates import Coordinates
class MulensData(object):
"""
A set of photometric measurements for a microlensing event.
Examples of how to define a MulensData object:
data = MulensData(file_name=SAMPLE_FILE_01)
data = MulensData(data_list=[[Dates], [Magnitudes], [Errors]])
**Parallax calculations assume that the dates supplied are
BJD_TDB. See** :py:class:`~MulensModel.trajectory.Trajectory`. If
you aren't using parallax, the time system shouldn't matter as
long as it is consistent across all MulensData and Model objects.
If you have multiple datasets, then you also need multiple instances
of MulensData class.
Keywords :
data_list: [*list* of *lists*, *numpy.ndarray*], optional
The list that contains three *lists* or *numpy.ndarrays*
that specify: time, magnitude or flux, and its uncertainty
(in that order). The lengths of these three objects must be
the same.
file_name: *str*, optional
The path to a file with columns: Date, Magnitude/Flux,
Err. Loaded using :py:func:`numpy.loadtxt()`. See ``**kwargs``.
**Either data_list or file_name is required.**
phot_fmt: *str*
Specifies whether the photometry is provided in magnitude or flux
space. Accepts either 'mag' or 'flux'. Default = 'mag'.
chi2_fmt: *str*
Specifies whether the format used for chi^2 calculation
should be done in Magnitude or Flux spaces. Accepts either
'mag' or 'flux'. Default is 'flux' because almost always
the errors are gaussian in flux space.
coords: *astropy.SkyCoord*, optional
sky coordinates of the event
ra, dec: *str*, optional
sky coordinates of the event
ephemerides_file: *str*, optional
Specify the ephemerides of a satellite over the period when
the data were taken. You may want to extend the time range
to get nicer plots. Will be interpolated as necessary to
model the satellite parallax effect. See instructions_ on
getting satellite positions.
Note that there is no check on time format (e.g., BJD TBD vs. HJD)
and it should be the same as in *data_list* or *file_name*.
add_2450000: *boolean*, optional
Adds 2450000 to the input dates. Useful if the dates
are supplied as HJD-2450000.
add_2460000: *boolean*, optional
Adds 2460000 to the input dates. Useful if the dates
are supplied as HJD-2460000.
bandpass: see :obj:`bandpass`
bad: *boolean np.ndarray*, optional
Flags for bad data (data to exclude from fitting and
plotting). Should be the same length as the number of data
points.
good: *boolean np.ndarray*, optional
Flags for good data, should be the same length as the
number of data points.
plot_properties: *dict*, optional
Specify properties for plotting, e.g. ``color``, ``marker``,
``label``, ``alpha``, ``zorder``, ``markersize``, ``visible``,
and also the ``show_bad`` and ``show_errorbars``
properties.
Note: pyplot functions errorbar() and scatter() are used to
plot data with errorbars and without them, respectively.
The type and size of marker are specified using different
keywords: ('fmt', 'markersize') for errorbar() and
('marker', 'size') for scatter(). You can use either convention
in :py:attr:`plot_properties` and they will be translated
to appropriate keywords. If there are similar problems with
other keywords, then they won't be translated unless you
contact code authors.
Other special keys :
show_errorbars: *boolean*, optional
Whether or not to show the errorbars for this dataset.
show_bad: *boolean*, optional
Whether or not to plot data points flagged as bad.
.. _instructions:
https://github.com/rpoleski/MulensModel/blob/master/documents/Horizons_manual.md
"""
def __init__(self, data_list=None, file_name=None,
phot_fmt="mag", chi2_fmt="flux",
coords=None, ra=None, dec=None,
ephemerides_file=None, add_2450000=False,
add_2460000=False, bandpass=None, bad=None, good=None,
plot_properties=None, **kwargs):
# Initialize some variables
self._n_epochs = None
self._horizons = None
self._satellite_skycoord = None
self._init_keys = {'add245': add_2450000, 'add246': add_2460000}
self._limb_darkening_weights = None
self.bandpass = bandpass
self._chi2_fmt = chi2_fmt
# Set the coords (if applicable)...
coords_msg = 'Must specify both or neither of ra and dec'
self._coords = None
# ...using coords keyword
if coords is not None:
self._coords = Coordinates(coords)
# ...using ra, dec keywords
if ra is not None:
if dec is not None:
self._coords = Coordinates(ra, dec)
else:
raise AttributeError(coords_msg)
else:
if ra is not None:
raise AttributeError(coords_msg)
# Plot properties
if plot_properties is None:
plot_properties = {}
self.plot_properties = plot_properties
# Import the photometry...
if data_list is not None and file_name is not None:
raise ValueError(
'MulensData cannot be initialized with both data_list and ' +
'file_name. Choose one or the other.')
elif data_list is not None:
# ...from an array
if len(kwargs) > 0:
raise ValueError('data_list and kwargs cannot be both set')
if len(data_list) != 3:
try:
msg0 = "\n" + str(data_list) + "\n"
except Exception:
msg0 = ""
msg = (msg0 + "\n" +
'MulensData was initiated with data_list of length ' +
'{:}, while length of 3 is expected (i.e. time, mag ' +
'or flux, and uncertainty).')
raise ValueError(msg.format(len(data_list)))
(vector_1, vector_2, vector_3) = list(data_list)
self._initialize(
phot_fmt, time=np.array(vector_1),
brightness=np.array(vector_2),
err_brightness=np.array(vector_3), coords=self._coords)
elif file_name is not None:
# ...from a file
usecols = kwargs.pop('usecols', (0, 1, 2))
if not exists(file_name):
raise FileNotFoundError(file_name)
try:
(vector_1, vector_2, vector_3) = np.loadtxt(
fname=file_name, unpack=True, usecols=usecols, **kwargs)
except Exception:
print("kwargs passed to np.loadtxt():")
print(kwargs)
print("usecols =", usecols)
print("File:", file_name)
raise
self._initialize(
phot_fmt, time=vector_1, brightness=vector_2,
err_brightness=vector_3, coords=self._coords)
# check if data label specified, if not use file_name
if 'label' not in self.plot_properties.keys():
if file_name is not None:
self.plot_properties['label'] = basename(file_name)
else:
self.plot_properties['label'] = 'a dataset'
else:
raise ValueError(
'MulensData cannot be initialized with ' +
'data_list or file_name')
if bad is not None and good is not None:
raise ValueError('Provide bad or good, but not both')
elif bad is not None:
self.bad = bad
elif good is not None:
self.good = good
else:
self.bad = self.n_epochs * [False]
# Set up satellite properties (if applicable)
self._ephemerides_file = ephemerides_file
def _initialize(self, phot_fmt, time=None, brightness=None,
err_brightness=None, coords=None):
"""
Internal function to import photometric data into the correct
form using a few numpy ndarrays.
Parameters:
phot_fmt - Specifies type of photometry. Either 'flux' or 'mag'.
time - Date vector of the data
brightness - vector of the photometric measurements
err_brightness - vector of the errors in the phot measurements.
coords - Sky coordinates of the event, optional
"""
if self._init_keys['add245'] and self._init_keys['add246']:
raise ValueError(
'You cannot initialize MulensData with both ' +
'add_2450000 and add_2460000 being True')
if time.dtype != np.float64:
raise TypeError((
'time vector in MulensData() must be of ' +
'numpy.float64 type, not {:}').format(time.dtype))
# Adjust the time vector as necessary.
if self._init_keys['add245']:
time += 2450000.
elif self._init_keys['add246']:
time += 2460000.
# Store the time vector
self._time = time
self._n_epochs = len(time)
# Check that the number of epochs equals the number of observations
if ((len(brightness) != self._n_epochs) or
(len(err_brightness) != self._n_epochs)):
raise ValueError('input data in MulesData have different lengths')
# Store the photometry
self._brightness_input = brightness
self._brightness_input_err = err_brightness
self._input_fmt = phot_fmt
# Create the complementary photometry (mag --> flux, flux --> mag)
if phot_fmt == "mag":
self._mag = self._brightness_input
self._err_mag = self._brightness_input_err
(self._flux, self._err_flux) = Utils.get_flux_and_err_from_mag(
mag=self.mag, err_mag=self.err_mag)
elif phot_fmt == "flux":
self._flux = self._brightness_input
self._err_flux = self._brightness_input_err
self._mag = None
self._err_mag = None
else:
msg = 'unknown brightness format in MulensData'
raise ValueError(msg)
def plot(self, phot_fmt=None, show_errorbars=None, show_bad=None,
subtract_2450000=False, subtract_2460000=False,
model=None, plot_residuals=False, **kwargs):
"""
Plot the data.
Uses :py:attr:`plot_properties` for label, color, etc.
This settings can be changed by setting ``**kwargs``.
You can plot in either flux or magnitude space. You can plot
data in a scale defined by other dataset -- pass *model* argument
and *model.data_ref* will be used as reference. Instead of plotting
data themselves, you can also plot the residuals of a *model*.
Keywords:
phot_fmt: *string* ('mag', 'flux')
Whether to plot the data in magnitudes or in flux. Default
is the same as :py:attr:`input_fmt`.
show_errorbars: *boolean*
If show_errorbars is True (default), plots with
matplotlib.errorbar(). If False, plots with
matplotlib.scatter().
show_bad: *boolean*
If False, bad data are suppressed (default).
If True, shows points marked as bad
(:py:obj:`mulensdata.MulensData.bad`) as 'x'
subtract_2450000, subtract_2460000: *boolean*
If True, subtracts 2450000 or 2460000 from the time
axis to get more human-scale numbers. If using it, make
sure to also set the same settings for all other
plotting calls (e.g. :py:func:`plot_lc()`).
model: :py:class:`~MulensModel.model.Model`
Model used to scale the data or calculate residuals
(if *plot_residuals* is *True*). If provided, then data are
scaled to *model.data_ref* dataset.
plot_residuals: *boolean*
If *True* then residuals are plotted (*model* is required).
Default is *False*, i.e., plot the data.
``**kwargs``: passed to matplotlib plotting functions.
"""
if phot_fmt is None:
phot_fmt = self.input_fmt
if phot_fmt not in ['mag', 'flux']:
raise ValueError('wrong value of phot_fmt: {:}'.format(phot_fmt))
if plot_residuals and model is None:
raise ValueError(
'MulensData.plot() requires model to plot residuals')
subtract = 0.
if subtract_2450000:
if subtract_2460000:
raise ValueError("subtract_2450000 and subtract_2460000 " +
"cannot be both True")
subtract = 2450000.
if subtract_2460000:
subtract = 2460000.
if show_errorbars is None:
show_errorbars = self.plot_properties.get('show_errorbars', True)
if show_bad is None:
show_bad = self.plot_properties.get('show_bad', False)
if model is None:
(y_value, y_err) = self._get_y_value_y_err(phot_fmt,
self.flux,
self.err_flux)
else:
if plot_residuals:
residuals = model.get_residuals(data_ref=model.data_ref,
type=phot_fmt, data=self)
y_value = residuals[0][0]
y_err = residuals[1][0]
else:
i_data_ref = model.data_ref
(f_source_0, f_blend_0) = model.get_ref_fluxes(
data_ref=model.datasets[i_data_ref])
(f_source, f_blend) = model.get_ref_fluxes(data_ref=self)
model.data_ref = i_data_ref
flux = f_source_0 * (self.flux - f_blend) / f_source
flux += f_blend_0
err_flux = f_source_0 * self.err_flux / f_source
(y_value, y_err) = self._get_y_value_y_err(phot_fmt,
flux, err_flux)
properties = self._set_plot_properties(
show_errorbars=show_errorbars, **kwargs)
properties_bad = self._set_plot_properties(
show_errorbars=show_errorbars, bad=True, **kwargs)
time_good = self.time[self.good] - subtract
time_bad = self.time[self.bad] - subtract
if show_errorbars:
container = self._plt_errorbar(time_good, y_value[self.good],
y_err[self.good], properties)
if show_bad:
if 'color' in properties_bad or 'c' in properties_bad:
pass
else:
properties_bad['color'] = container[0].get_color()
self._plt_errorbar(time_bad, y_value[self.bad],
y_err[self.bad], properties_bad)
else:
collection = self._plt_scatter(time_good, y_value[self.good],
properties)
if show_bad:
change = True
keys = ['c', 'color', 'facecolor', 'facecolors', 'edgecolors']
for key in keys:
change &= key not in properties_bad
if change:
properties_bad['color'] = collection.get_edgecolor()
self._plt_scatter(time_bad, y_value[self.bad], properties_bad)
if phot_fmt == 'mag':
(ymin, ymax) = plt.gca().get_ylim()
if ymax > ymin:
plt.gca().invert_yaxis()
def _set_plot_properties(self, show_errorbars=True, bad=False, **kwargs):
"""
Set plot properties using ``**kwargs`` and
`py:plot_properties`. kwargs takes precedent.
Keywords:
show_errorbars: *boolean*
`True` means plotting done with plt.errorbar. `False`
means plotting done with plt.scatter.
bad: *boolean*
`True` means marker is default to 'x'. `False` means
marker is default to 'o'.
``**kwargs``: *dict*
Keywords accepted by plt.errorbar or plt.scatter.
"""
if show_errorbars:
marker_key = 'fmt'
size_key = 'markersize' # In plt.errorbar(), 'ms' is equivalent.
else:
marker_key = 'marker'
size_key = 's'
marker_keys_all = ['marker', 'fmt']
size_keys_all = ['markersize', 'ms', 's']
# Some older versions of matplotlib have problems when both
# 'fmt' and 'color' are specified. Below we take a list of formats
# from Notes section of:
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html
if 'fmt' in kwargs:
for char in ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']:
if char in kwargs['fmt']:
kwargs['fmt'] = kwargs['fmt'].replace(char, "")
kwargs['color'] = char
properties = {}
# Overwrite dataset settings (i.e., self.plot_properties) with kwargs.
for dictionary in [self.plot_properties, kwargs]:
for (key, value) in dictionary.items():
if key in marker_keys_all:
properties[marker_key] = value
elif key in size_keys_all:
properties[size_key] = value
else:
properties[key] = value
if bad:
properties[marker_key] = 'x'
elif marker_key not in properties.keys():
properties[marker_key] = 'o'
if size_key not in properties.keys():
properties[size_key] = 5
for remove_key in ['show_bad', 'show_errorbars']:
properties.pop(remove_key, None)
return properties
def _plt_errorbar(self, time, y, yerr, kwargs):
"""
save run of matplotlib.pyplot.errorbar(); returns ErrorbarContainer
"""
try:
container = plt.errorbar(time, y, yerr=yerr, **kwargs)
except Exception:
print("kwargs passed to plt.errorbar():")
print(kwargs)
raise
return container
def _plt_scatter(self, time, y, kwargs):
"""
save run of matplotlib.pyplot.scatter(); returns PathCollection
"""
try:
collection = plt.scatter(time, y, **kwargs)
except Exception:
print("kwargs passed to plt.scatter():")
print(kwargs)
raise
return collection
def _get_y_value_y_err(self, phot_fmt, flux, flux_err):
"""
just calculate magnitudes if needed, or return input otherwise
"""
if phot_fmt == 'mag':
return Utils.get_mag_and_err_from_flux(flux, flux_err)
else:
return (flux, flux_err)
def set_limb_darkening_weights(self, weights):
"""
Save a dictionary of weights that will be used to evaluate the
limb darkening coefficient. See also
:py:class:`~MulensModel.limbdarkeningcoeffs.LimbDarkeningCoeffs`
Parameters :
weights: *dict*
A dictionary that specifies weight for each
bandpass. Keys are *str* and values are *float*, e.g.,
``{'I': 1.5, 'V': 1.}`` if the I-band gamma
limb-darkening coefficient is 1.5-times larger than
the V-band.
"""
if self.bandpass is not None:
raise ValueError(
"Don't try to run MMulensData.set_limb_darkening_weights() " +
"after bandpass was provided")
if not isinstance(weights, dict):
raise TypeError(
"MulensData.set_limb_darkening_weights() " +
"parameter has to be dict, not {:}".format(type(weights)))
self._limb_darkening_weights = weights
@property
def coords(self):
"""
:py:class:`~MulensModel.coordinates.Coordinates`
Sky coordinates of data.
See :py:class:`~MulensModel.coordinates.Coordinates`.
"""
return self._coords
@coords.setter
def coords(self, new_value):
self._coords = Coordinates(new_value)
@property
def time(self):
"""
*np.ndarray*
vector of dates
"""
return self._time
@property
def mag(self):
"""
*np.ndarray*
magnitude vector
"""
if self._mag is None:
(self._mag, self._err_mag) = Utils.get_mag_and_err_from_flux(
flux=self.flux, err_flux=self.err_flux)
return self._mag
@property
def err_mag(self):
"""
*np.ndarray*
vector of magnitude errors
"""
if self._err_mag is None:
self.mag
return self._err_mag
@property
def flux(self):
"""
*numpy.ndarray*
Vector of the measured brightness in flux units.
"""
if self._flux is None:
(self._flux, self._err_flux) = Utils.get_flux_and_err_from_mag(
mag=self.mag, err_mag=self.err_mag)
return self._flux
@property
def err_flux(self):
"""
*np.ndarray*
Vector of uncertainties of *flux* values.
"""
if self._err_flux is None:
self.flux
return self._err_flux
@property
def bad(self):
"""
*np.ndarray boolean*
flags marking bad data
"""
return self._bad
@bad.setter
def bad(self, new_value):
new_value = np.asarray(new_value)
if new_value.dtype != np.dtype('bool'):
raise TypeError("MulensData.bad has to be a boolean numpy array")
self._bad = new_value
self._good = np.logical_not(self._bad)
@property
def good(self):
"""
*np.ndarray boolean*
flags marking good data i.e., opposite to :py:func:`bad`
"""
return self._good
@good.setter
def good(self, new_value):
new_value = np.asarray(new_value)
if new_value.dtype != np.dtype('bool'):
raise TypeError("MulensData.good has to be a boolean numpy array")
self._good = new_value
self._bad = np.logical_not(self._good)
@property
def n_epochs(self):
"""
*int*
give total number of epochs (including bad data)
"""
return self._n_epochs
def data_and_err_in_input_fmt(self):
"""
Gives photometry in input format (mag or flux).
Returns :
data: *np.ndarray*
Magnitudes or fluxes
data_err: *np.ndarray*
Uncertainties of magnitudes or of fluxes
"""
if self.input_fmt == "mag":
data = self.mag
err_data = self.err_mag
elif self.input_fmt == "flux":
data = self.flux
err_data = self.err_flux
else:
raise ValueError('Unrecognized data format: {:}'.format(
self.input_fmt))
return (data, err_data)
def data_and_err_in_chi2_fmt(self):
"""
Gives photometry in format used for chi2 calculation
(flux in most cases, but magnitude possible).
Returns :
data: *np.ndarray*
Magnitudes or fluxes
data_err: *np.ndarray*
Uncertainties of magnitudes or of fluxes
"""
if self.chi2_fmt == "mag":
data = self.mag
err_data = self.err_mag
elif self.chi2_fmt == "flux":
data = self.flux
err_data = self.err_flux
else:
raise ValueError('Unrecognized data format: {:}'.format(
self.chi2_fmt))
return (data, err_data)
@property
def bandpass(self):
"""
*String*
Bandpass of given dataset (primary usage is limb darkening), e.g. 'I'
or 'V'. Returns *None* if not set.
"""
return self._bandpass
@bandpass.setter
def bandpass(self, value):
if self._limb_darkening_weights is not None:
raise ValueError(
"Limb darkening weights were already set - you" +
"cannot bandpass now.")
self._bandpass = value
@property
def satellite_skycoord(self):
"""
*Astropy.SkyCoord* object for satellite
positions at epochs covered by the dataset
Returns :
skycoord: *astropy.coordinates.SkyCoord*
satellite positions at epochs covered by the dataset
"""
if self.ephemerides_file is None:
raise ValueError('ephemerides_file is not defined.')
if self._satellite_skycoord is None:
satellite_skycoord = SatelliteSkyCoord(
ephemerides_file=self.ephemerides_file)
self._satellite_skycoord = satellite_skycoord.get_satellite_coords(
self._time)
return self._satellite_skycoord
@property
def input_fmt(self):
"""
*str* ('mag' or 'flux')
Input format - same as *phot_fmt* keyword in __init__().
"""
return self._input_fmt
@property
def chi2_fmt(self):
"""
*str* ('mag' or 'flux')
Photometry format used for chi^2 calculations. Default is 'flux'.
"""
return self._chi2_fmt
@property
def ephemerides_file(self):
"""
*str*
File with satellite ephemeris.
"""
return self._ephemerides_file
| mit |
madgik/exareme | Exareme-Docker/src/mip-algorithms/CART/init/1/local.py | 1 | 2668 | from __future__ import division
from __future__ import print_function
import sys
from os import path
from argparse import ArgumentParser
import sqlite3
import json
import pandas as pd
import time
sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) + '/utils/')
sys.path.append(path.dirname(path.dirname(path.dirname(path.dirname(path.abspath(__file__))))) + '/CART/')
from algorithm_utils import query_database, variable_categorical_getDistinctValues, StateData, PrivacyError, PRIVACY_MAGIC_NUMBER
from cart_lib import CartInit_Loc2Glob_TD, cart_init_1_local
def main(args):
t1 = time.localtime(time.time())
# Parse arguments
sys.argv =args
parser = ArgumentParser()
parser.add_argument('-x', required=True, help='Independent variable names, comma separated.')
parser.add_argument('-y', required=True, help='Dependent variable name')
parser.add_argument('-cur_state_pkl', required=True, help='Path to the pickle file holding the current state.')
parser.add_argument('-input_local_DB', required=True, help='Path to local db.')
parser.add_argument('-db_query', required=True, help='Query to be executed on local db.')
args, unknown = parser.parse_known_args()
query = args.db_query
fname_cur_state = path.abspath(args.cur_state_pkl)
fname_loc_db = path.abspath(args.input_local_DB)
query = query.replace("\\\"","\"")
# Get variable
args_X = list(args.x.replace(' ', '').split(','))
args_Y = [args.y.replace(' ', '')]
#1. Query database and metadata
queryMetadata = "select * from metadata where code in (" + "'" + "','".join(args_X) + "','" + "','".join(args_Y) + "'" + ");"
dataSchema, metadataSchema, metadata, dataFrame = query_database(fname_db=fname_loc_db, queryData=query, queryMetadata=queryMetadata)
CategoricalVariables = variable_categorical_getDistinctValues(metadata)
#2. Run algorithm
dataFrame, CategoricalVariables = cart_init_1_local(dataFrame, dataSchema, CategoricalVariables)
if len(dataFrame) < PRIVACY_MAGIC_NUMBER:
raise PrivacyError('The Experiment could not run with the input provided because there are insufficient data.')
#3. Save local state
local_state = StateData( dataFrame = dataFrame,
args_X = args_X,
args_Y = args_Y,
CategoricalVariables = CategoricalVariables)
local_state.save(fname = fname_cur_state)
# Transfer local output
local_out = CartInit_Loc2Glob_TD(args_X, args_Y, CategoricalVariables, t1)
local_out.transfer()
if __name__ == '__main__':
main()
| mit |
bakkou-badri/dataminingproject | env/lib/python2.7/site-packages/numpy/lib/twodim_base.py | 9 | 24965 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
__all__ = ['diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices',
'tril_indices', 'tril_indices_from', 'triu_indices', 'triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty, diagonal
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return v.diagonal(k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)), -k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype), m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, dtype=m.dtype)), m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the `i`-th output column is the input vector
raised element-wise to the power of ``N - i - 1``. Such a matrix with
a geometric progression in each row is named for Alexandre-Theophile
Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None:
N=len(x)
X = ones( (len(x), N), x.dtype)
for i in range(N - 1):
X[:, i] = x**(N - i - 1)
return X
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow:\nequidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can displaying exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh:\nexact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage:\ninterpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
The row dimension of the square arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n, tril, k)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0], k)
def triu_indices(n, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n, triu, k)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of a (N, N) array.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0], k)
| gpl-2.0 |
Yurlungur/lagrange-interpolation | plot_convergence_1d.py | 1 | 1069 | #!/usr/bin/env python
from __future__ import print_function
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import sys
NX = [11,21,41]
PLOT_NAME = "images/convergence_slice."
linewidth=3
fontsize=12
fontsize_labels=16
mpl.rcParams.update({'font.size': fontsize})
if __name__ == "__main__":
if len(sys.argv) < 3:
print("{} ORDER_1D FILENAME".format(sys.argv[0]))
quit()
order = float(sys.argv[1])
filename = sys.argv[2]
data = np.loadtxt(filename)
x = data[...,0]
errors = data[...,1:].transpose()
dx = map(lambda r: (np.max(x)-np.min(x))/(r-1),NX)
for i in range(errors.shape[0]):
e = errors[i]
plt.plot(x,e/(dx[i]**order),
label='nx = {}'.format(NX[i]),
lw=linewidth)
plt.legend()
plt.xlabel('x',fontsize=fontsize_labels)
plt.ylabel(r'error$/dx^{%f}$' % order,
fontsize=fontsize_labels)
for extension in ["png","pdf"]:
plt.savefig(PLOT_NAME+extension,
bbox_inches='tight')
| mit |
vmonaco/general-hough | src/GeneralHough.py | 1 | 4517 | '''
Created on May 19, 2013
@author: vinnie
'''
import os
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from scipy.misc import imread
from skimage.filter import canny
from scipy.ndimage.filters import sobel
# Good for the b/w test images used
MIN_CANNY_THRESHOLD = 10
MAX_CANNY_THRESHOLD = 50
def gradient_orientation(image):
'''
Calculate the gradient orientation for edge point in the image
'''
dx = sobel(image, axis=0, mode='constant')
dy = sobel(image, axis=1, mode='constant')
gradient = np.arctan2(dy,dx) * 180 / np.pi
return gradient
def build_r_table(image, origin):
'''
Build the R-table from the given shape image and a reference point
'''
edges = canny(image, low_threshold=MIN_CANNY_THRESHOLD,
high_threshold=MAX_CANNY_THRESHOLD)
gradient = gradient_orientation(edges)
r_table = defaultdict(list)
for (i,j),value in np.ndenumerate(edges):
if value:
r_table[gradient[i,j]].append((origin[0]-i, origin[1]-j))
return r_table
def accumulate_gradients(r_table, grayImage):
'''
Perform a General Hough Transform with the given image and R-table
'''
edges = canny(grayImage, low_threshold=MIN_CANNY_THRESHOLD,
high_threshold=MAX_CANNY_THRESHOLD)
gradient = gradient_orientation(edges)
accumulator = np.zeros(grayImage.shape)
for (i,j),value in np.ndenumerate(edges):
if value:
for r in r_table[gradient[i,j]]:
accum_i, accum_j = i+r[0], j+r[1]
if accum_i < accumulator.shape[0] and accum_j < accumulator.shape[1]:
accumulator[accum_i, accum_j] += 1
return accumulator
def general_hough_closure(reference_image):
'''
Generator function to create a closure with the reference image and origin
at the center of the reference image
Returns a function f, which takes a query image and returns the accumulator
'''
referencePoint = (reference_image.shape[0]/2, reference_image.shape[1]/2)
r_table = build_r_table(reference_image, referencePoint)
def f(query_image):
return accumulate_gradients(r_table, query_image)
return f
def n_max(a, n):
'''
Return the N max elements and indices in a
'''
indices = a.ravel().argsort()[-n:]
indices = (np.unravel_index(i, a.shape) for i in indices)
return [(a[i], i) for i in indices]
def test_general_hough(gh, reference_image, query):
'''
Uses a GH closure to detect shapes in an image and create nice output
'''
query_image = imread(query, flatten=True)
accumulator = gh(query_image)
plt.clf()
plt.gray()
fig = plt.figure()
fig.add_subplot(2,2,1)
plt.title('Reference image')
plt.imshow(reference_image)
fig.add_subplot(2,2,2)
plt.title('Query image')
plt.imshow(query_image)
fig.add_subplot(2,2,3)
plt.title('Accumulator')
plt.imshow(accumulator)
fig.add_subplot(2,2,4)
plt.title('Detection')
plt.imshow(query_image)
# top 5 results in red
m = n_max(accumulator, 5)
y_points = [pt[1][0] for pt in m]
x_points = [pt[1][1] for pt in m]
plt.scatter(x_points, y_points, marker='o', color='r')
# top result in yellow
i,j = np.unravel_index(accumulator.argmax(), accumulator.shape)
plt.scatter([j], [i], marker='x', color='y')
d,f = os.path.split(query)[0], os.path.splitext(os.path.split(query)[1])[0]
plt.savefig(os.path.join(d, f + '_output.png'))
return
def test():
reference_image = imread("../images/s.png", flatten=True)
detect_s = general_hough_closure(reference_image)
test_general_hough(detect_s, reference_image, "../images/s_test.png")
reference_image = imread("../images/diamond.png", flatten=True)
detect_s = general_hough_closure(reference_image)
test_general_hough(detect_s, reference_image, "../images/diamond_test1.png")
test_general_hough(detect_s, reference_image, "../images/diamond_test2.png")
test_general_hough(detect_s, reference_image, "../images/diamond_test3.png")
test_general_hough(detect_s, reference_image, "../images/diamond_test4.png")
test_general_hough(detect_s, reference_image, "../images/diamond_test5.png")
test_general_hough(detect_s, reference_image, "../images/diamond_test6.png")
if __name__ == '__main__':
test()
| mit |
great-expectations/great_expectations | tests/data_context/store/test_checkpoint_store.py | 1 | 7393 | import logging
from pathlib import Path
from typing import Dict, List, Union
import pytest
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.data_context.store import CheckpointStore
from great_expectations.data_context.types.base import CheckpointConfig
from great_expectations.data_context.types.resource_identifiers import (
ConfigurationIdentifier,
)
from great_expectations.util import filter_properties_dict, gen_directory_tree_str
from tests.test_utils import build_checkpoint_store_using_filesystem
logger = logging.getLogger(__name__)
def test_checkpoint_store(empty_data_context):
store_name: str = "checkpoint_store"
base_directory: str = str(Path(empty_data_context.root_directory) / "checkpoints")
checkpoint_store: CheckpointStore = build_checkpoint_store_using_filesystem(
store_name=store_name,
base_directory=base_directory,
overwrite_existing=True,
)
assert len(checkpoint_store.list_keys()) == 0
with pytest.raises(TypeError):
checkpoint_store.set(
key="my_first_checkpoint", value="this is not a checkpoint"
)
assert len(checkpoint_store.list_keys()) == 0
checkpoint_name_0: str = "my_checkpoint_0"
run_name_template_0: str = "%Y-%M-my-run-template-$VAR"
validations_0: Union[List, Dict] = [
{
"batch_request": {
"datasource_name": "my_pandas_datasource",
"data_connector_name": "my_runtime_data_connector",
"data_asset_name": "my_website_logs",
},
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "StoreEvaluationParametersAction",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
],
}
]
expectation_suite_name_0: str = "my.test.expectation_suite.name"
evaluation_parameters_0: dict = {
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
}
runtime_configuration_0: dict = {
"result_format": {
"result_format": "BASIC",
"partial_unexpected_count": 20,
},
}
my_checkpoint_config_0: CheckpointConfig = CheckpointConfig(
name=checkpoint_name_0,
run_name_template=run_name_template_0,
expectation_suite_name=expectation_suite_name_0,
evaluation_parameters=evaluation_parameters_0,
runtime_configuration=runtime_configuration_0,
validations=validations_0,
)
key_0: ConfigurationIdentifier = ConfigurationIdentifier(
configuration_key=checkpoint_name_0,
)
checkpoint_store.set(key=key_0, value=my_checkpoint_config_0)
assert len(checkpoint_store.list_keys()) == 1
assert filter_properties_dict(
properties=checkpoint_store.get(key=key_0).to_json_dict(),
clean_falsy=True,
) == filter_properties_dict(
properties=my_checkpoint_config_0.to_json_dict(),
clean_falsy=True,
)
dir_tree: str = gen_directory_tree_str(startpath=base_directory)
assert (
dir_tree
== """checkpoints/
.ge_store_backend_id
my_checkpoint_0.yml
"""
)
checkpoint_name_1: str = "my_checkpoint_1"
run_name_template_1: str = "%Y-%M-my-run-template-$VAR"
validations_1: Union[List, Dict] = [
{
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
},
},
{
"name": "store_evaluation_params",
"action": {
"class_name": "StoreEvaluationParametersAction",
},
},
{
"name": "update_data_docs",
"action": {
"class_name": "UpdateDataDocsAction",
},
},
]
}
]
expectation_suite_name_1: str = "my.test.expectation_suite.name"
batch_request_1: dict = {
"datasource_name": "my_pandas_datasource",
"data_connector_name": "my_runtime_data_connector",
"data_asset_name": "my_website_logs",
}
evaluation_parameters_1: dict = {
"environment": "$GE_ENVIRONMENT",
"tolerance": 1.0e-2,
"aux_param_0": "$MY_PARAM",
"aux_param_1": "1 + $MY_PARAM",
}
runtime_configuration_1: dict = {
"result_format": {
"result_format": "BASIC",
"partial_unexpected_count": 20,
},
}
my_checkpoint_config_1: CheckpointConfig = CheckpointConfig(
name=checkpoint_name_1,
run_name_template=run_name_template_1,
expectation_suite_name=expectation_suite_name_1,
batch_request=batch_request_1,
evaluation_parameters=evaluation_parameters_1,
runtime_configuration=runtime_configuration_1,
validations=validations_1,
)
key_1: ConfigurationIdentifier = ConfigurationIdentifier(
configuration_key=checkpoint_name_1,
)
checkpoint_store.set(key=key_1, value=my_checkpoint_config_1)
assert len(checkpoint_store.list_keys()) == 2
assert filter_properties_dict(
properties=checkpoint_store.get(key=key_1).to_json_dict(),
clean_falsy=True,
) == filter_properties_dict(
properties=my_checkpoint_config_1.to_json_dict(),
clean_falsy=True,
)
dir_tree: str = gen_directory_tree_str(startpath=base_directory)
assert (
dir_tree
== """checkpoints/
.ge_store_backend_id
my_checkpoint_0.yml
my_checkpoint_1.yml
"""
)
self_check_report: dict = convert_to_json_serializable(
data=checkpoint_store.self_check()
)
assert self_check_report == {
"keys": ["my_checkpoint_0", "my_checkpoint_1"],
"len_keys": 2,
"config": {
"store_name": "checkpoint_store",
"class_name": "CheckpointStore",
"module_name": "great_expectations.data_context.store.checkpoint_store",
"overwrite_existing": True,
"store_backend": {
"base_directory": f"{empty_data_context.root_directory}/checkpoints",
"platform_specific_separator": True,
"fixed_length_key": False,
"suppress_store_backend_id": False,
"module_name": "great_expectations.data_context.store.tuple_store_backend",
"class_name": "TupleFilesystemStoreBackend",
"filepath_template": "{0}.yml",
},
},
}
checkpoint_store.remove_key(key=key_0)
checkpoint_store.remove_key(key=key_1)
assert len(checkpoint_store.list_keys()) == 0
| apache-2.0 |
tcheehow/MissionPlanner | Lib/site-packages/numpy/fft/fftpack.py | 59 | 39653 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
| gpl-3.0 |
dennisobrien/bokeh | bokeh/util/hex.py | 3 | 8197 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions useful for dealing with hexacognal tilings.
For more information on the concepts employed here, see this informative page
https://www.redblobgames.com/grids/hexagons/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import numpy as np
# Bokeh imports
from .dependencies import import_required
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def axial_to_cartesian(q, r, size, orientation, aspect_scale=1):
''' Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relationto a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
if orientation == "pointytop":
x = size * np.sqrt(3) * (q + r/2.0) / aspect_scale
y = -size * 3/2.0 * r
else:
x = size * 3/2.0 * q
y = -size * np.sqrt(3) * (r + q/2.0) * aspect_scale
return (x, y)
def cartesian_to_axial(x, y, size, orientation, aspect_scale=1):
''' Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
HEX_FLAT = [2.0/3.0, 0.0, -1.0/3.0, np.sqrt(3.0)/3.0]
HEX_POINTY = [np.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * (aspect_scale if orientation == "pointytop" else 1)
y = -y / size / (aspect_scale if orientation == "flattop" else 1)
q = coords[0] * x + coords[1] * y
r = coords[2] * x + coords[3] * y
return _round_hex(q, r)
def hexbin(x, y, size, orientation="pointytop", aspect_scale=1):
''' Perform an equal-weight binning of data points into hexagonal tiles.
For more sophiscticated use cases, e.g. weighted binning or scaling
individual tiles proprtional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (insted of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots.
'''
pd = import_required('pandas','hexbin requires pandas to be installed')
q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale)
df = pd.DataFrame(dict(r=r, q=q))
return df.groupby(['q', 'r']).size().reset_index(name='counts')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _round_hex(q, r):
''' Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int])
'''
x = q
z = r
y = -x-z
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
dx = np.abs(rx - x)
dy = np.abs(ry - y)
dz = np.abs(rz - z)
cond = (dx > dy) & (dx > dz)
q = np.where(cond , -(ry + rz), rx)
r = np.where(~cond & ~(dy > dz), -(rx + ry), rz)
return q.astype(int), r.astype(int)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Features/multiple_features/best_kNN_PC/cross_validate_objects_kNN_PC_BMED_8813_HAP_scaled_method_II_area_shape.py | 1 | 4401 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[41:82,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((90,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
aflaxman/scikit-learn | sklearn/preprocessing/data.py | 7 | 94754 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from __future__ import division
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import string_types
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import (check_is_fitted, check_random_state,
FLOAT_DTYPES)
BOUNDS_THRESHOLD = 1e-7
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'QuantileTransformer',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'quantile_transform',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[ 0. 0. ]
[ 0.25 0.25]
[ 0.5 0.5 ]
[ 1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[ 1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
Examples
--------
>>> from sklearn.preprocessing import StandardScaler
>>>
>>> data = [[0, 0], [0, 0], [1, 1], [1, 1]]
>>> scaler = StandardScaler()
>>> print(scaler.fit(data))
StandardScaler(copy=True, with_mean=True, with_std=True)
>>> print(scaler.mean_)
[ 0.5 0.5]
>>> print(scaler.transform(data))
[[-1. -1.]
[-1. -1.]
[ 1. 1.]
[ 1. 1.]]
>>> print(scaler.transform([[2, 2]]))
[[ 3. 3.]]
See also
--------
scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y='deprecated', copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
copy : bool, optional (default: None)
Copy the input X or not.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Transformed array.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the ``axis`` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the ``transform``
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This will cause ``transform`` to raise an exception when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the estimator API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X):
"""Center and scale the data.
Can be called on sparse input, provided that ``RobustScaler`` has been
fitted to dense input and ``with_centering=False``.
Parameters
----------
X : {array-like, sparse matrix}
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data.
Returns
-------
self : instance
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
See also
--------
normalize: Equivalent function without the estimator API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool, optional (default: None)
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the estimator API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
Parameters
----------
X : array-like
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y='deprecated', copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool
Copy the input X or not.
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y='deprecated', copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and four samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
class QuantileTransformer(BaseEstimator, TransformerMixin):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import QuantileTransformer
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> qt = QuantileTransformer(n_quantiles=10, random_state=0)
>>> qt.fit_transform(X) # doctest: +ELLIPSIS
array([...])
See also
--------
quantile_transform : Equivalent function without the estimator API.
StandardScaler : perform standardization that is faster, but less robust
to outliers.
RobustScaler : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, n_quantiles=1000, output_distribution='uniform',
ignore_implicit_zeros=False, subsample=int(1e5),
random_state=None, copy=True):
self.n_quantiles = n_quantiles
self.output_distribution = output_distribution
self.ignore_implicit_zeros = ignore_implicit_zeros
self.subsample = subsample
self.random_state = random_state
self.copy = copy
def _dense_fit(self, X, random_state):
"""Compute percentiles for dense matrices.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
"""
if self.ignore_implicit_zeros:
warnings.warn("'ignore_implicit_zeros' takes effect only with"
" sparse matrix. This parameter has no effect.")
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = (self.references_ * 100).tolist()
self.quantiles_ = []
for col in X.T:
if self.subsample < n_samples:
subsample_idx = random_state.choice(n_samples,
size=self.subsample,
replace=False)
col = col.take(subsample_idx, mode='clip')
self.quantiles_.append(np.percentile(col, references))
self.quantiles_ = np.transpose(self.quantiles_)
def _sparse_fit(self, X, random_state):
"""Compute percentiles for sparse matrices.
Parameters
----------
X : sparse matrix CSC, shape (n_samples, n_features)
The data used to scale along the features axis. The sparse matrix
needs to be nonnegative.
"""
n_samples, n_features = X.shape
# for compatibility issue with numpy<=1.8.X, references
# need to be a list scaled between 0 and 100
references = list(map(lambda x: x * 100, self.references_))
self.quantiles_ = []
for feature_idx in range(n_features):
column_nnz_data = X.data[X.indptr[feature_idx]:
X.indptr[feature_idx + 1]]
if len(column_nnz_data) > self.subsample:
column_subsample = (self.subsample * len(column_nnz_data) //
n_samples)
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=column_subsample,
dtype=X.dtype)
else:
column_data = np.zeros(shape=self.subsample, dtype=X.dtype)
column_data[:column_subsample] = random_state.choice(
column_nnz_data, size=column_subsample, replace=False)
else:
if self.ignore_implicit_zeros:
column_data = np.zeros(shape=len(column_nnz_data),
dtype=X.dtype)
else:
column_data = np.zeros(shape=n_samples, dtype=X.dtype)
column_data[:len(column_nnz_data)] = column_nnz_data
if not column_data.size:
# if no nnz, an error will be raised for computing the
# quantiles. Force the quantiles to be zeros.
self.quantiles_.append([0] * len(references))
else:
self.quantiles_.append(
np.percentile(column_data, references))
self.quantiles_ = np.transpose(self.quantiles_)
def fit(self, X, y=None):
"""Compute the quantiles used for transforming.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
self : object
Returns self
"""
if self.n_quantiles <= 0:
raise ValueError("Invalid value for 'n_quantiles': %d. "
"The number of quantiles must be at least one."
% self.n_quantiles)
if self.subsample <= 0:
raise ValueError("Invalid value for 'subsample': %d. "
"The number of subsamples must be at least one."
% self.subsample)
if self.n_quantiles > self.subsample:
raise ValueError("The number of quantiles cannot be greater than"
" the number of samples used. Got {} quantiles"
" and {} samples.".format(self.n_quantiles,
self.subsample))
X = self._check_inputs(X)
rng = check_random_state(self.random_state)
# Create the quantiles of reference
self.references_ = np.linspace(0, 1, self.n_quantiles,
endpoint=True)
if sparse.issparse(X):
self._sparse_fit(X, rng)
else:
self._dense_fit(X, rng)
return self
def _transform_col(self, X_col, quantiles, inverse):
"""Private function to transform a single feature"""
if self.output_distribution == 'normal':
output_distribution = 'norm'
else:
output_distribution = self.output_distribution
output_distribution = getattr(stats, output_distribution)
# older version of scipy do not handle tuple as fill_value
# clipping the value before transform solve the issue
if not inverse:
lower_bound_x = quantiles[0]
upper_bound_x = quantiles[-1]
lower_bound_y = 0
upper_bound_y = 1
else:
lower_bound_x = 0
upper_bound_x = 1
lower_bound_y = quantiles[0]
upper_bound_y = quantiles[-1]
# for inverse transform, match a uniform PDF
X_col = output_distribution.cdf(X_col)
# find index for lower and higher bounds
lower_bounds_idx = (X_col - BOUNDS_THRESHOLD <
lower_bound_x)
upper_bounds_idx = (X_col + BOUNDS_THRESHOLD >
upper_bound_x)
if not inverse:
# Interpolate in one direction and in the other and take the
# mean. This is in case of repeated values in the features
# and hence repeated quantiles
#
# If we don't do this, only one extreme of the duplicated is
# used (the upper when we do assending, and the
# lower for descending). We take the mean of these two
X_col = .5 * (np.interp(X_col, quantiles, self.references_)
- np.interp(-X_col, -quantiles[::-1],
-self.references_[::-1]))
else:
X_col = np.interp(X_col, self.references_, quantiles)
X_col[upper_bounds_idx] = upper_bound_y
X_col[lower_bounds_idx] = lower_bound_y
# for forward transform, match the output PDF
if not inverse:
X_col = output_distribution.ppf(X_col)
# find the value to clip the data to avoid mapping to
# infinity. Clip such that the inverse transform will be
# consistent
clip_min = output_distribution.ppf(BOUNDS_THRESHOLD -
np.spacing(1))
clip_max = output_distribution.ppf(1 - (BOUNDS_THRESHOLD -
np.spacing(1)))
X_col = np.clip(X_col, clip_min, clip_max)
return X_col
def _check_inputs(self, X, accept_sparse_negative=False):
"""Check inputs before fit and transform"""
X = check_array(X, accept_sparse='csc', copy=self.copy,
dtype=[np.float64, np.float32])
# we only accept positive sparse matrix when ignore_implicit_zeros is
# false and that we call fit or transform.
if (not accept_sparse_negative and not self.ignore_implicit_zeros and
(sparse.issparse(X) and np.any(X.data < 0))):
raise ValueError('QuantileTransformer only accepts non-negative'
' sparse matrices.')
# check the output PDF
if self.output_distribution not in ('normal', 'uniform'):
raise ValueError("'output_distribution' has to be either 'normal'"
" or 'uniform'. Got '{}' instead.".format(
self.output_distribution))
return X
def _check_is_fitted(self, X):
"""Check the inputs before transforming"""
check_is_fitted(self, 'quantiles_')
# check that the dimension of X are adequate with the fitted data
if X.shape[1] != self.quantiles_.shape[1]:
raise ValueError('X does not have the same number of features as'
' the previously fitted data. Got {} instead of'
' {}.'.format(X.shape[1],
self.quantiles_.shape[1]))
def _transform(self, X, inverse=False):
"""Forward and inverse transform.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The data used to scale along the features axis.
inverse : bool, optional (default=False)
If False, apply forward transform. If True, apply
inverse transform.
Returns
-------
X : ndarray, shape (n_samples, n_features)
Projected data
"""
if sparse.issparse(X):
for feature_idx in range(X.shape[1]):
column_slice = slice(X.indptr[feature_idx],
X.indptr[feature_idx + 1])
X.data[column_slice] = self._transform_col(
X.data[column_slice], self.quantiles_[:, feature_idx],
inverse)
else:
for feature_idx in range(X.shape[1]):
X[:, feature_idx] = self._transform_col(
X[:, feature_idx], self.quantiles_[:, feature_idx],
inverse)
return X
def transform(self, X):
"""Feature-wise transformation of the data.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X)
self._check_is_fitted(X)
return self._transform(X, inverse=False)
def inverse_transform(self, X):
"""Back-projection to the original space.
Parameters
----------
X : ndarray or sparse matrix, shape (n_samples, n_features)
The data used to scale along the features axis. If a sparse
matrix is provided, it will be converted into a sparse
``csc_matrix``. Additionally, the sparse matrix needs to be
nonnegative if `ignore_implicit_zeros` is False.
Returns
-------
Xt : ndarray or sparse matrix, shape (n_samples, n_features)
The projected data.
"""
X = self._check_inputs(X, accept_sparse_negative=True)
self._check_is_fitted(X)
return self._transform(X, inverse=True)
def quantile_transform(X, axis=0, n_quantiles=1000,
output_distribution='uniform',
ignore_implicit_zeros=False,
subsample=int(1e5),
random_state=None,
copy=False):
"""Transform features using quantiles information.
This method transforms the features to follow a uniform or a normal
distribution. Therefore, for a given feature, this transformation tends
to spread out the most frequent values. It also reduces the impact of
(marginal) outliers: this is therefore a robust preprocessing scheme.
The transformation is applied on each feature independently.
The cumulative density function of a feature is used to project the
original values. Features values of new/unseen data that fall below
or above the fitted range will be mapped to the bounds of the output
distribution. Note that this transform is non-linear. It may distort linear
correlations between variables measured at the same scale but renders
variables measured at different scales more directly comparable.
Read more in the :ref:`User Guide <preprocessing_transformer>`.
Parameters
----------
X : array-like, sparse matrix
The data to transform.
axis : int, (default=0)
Axis used to compute the means and standard deviations along. If 0,
transform each feature, otherwise (if 1) transform each sample.
n_quantiles : int, optional (default=1000)
Number of quantiles to be computed. It corresponds to the number
of landmarks used to discretize the cumulative density function.
output_distribution : str, optional (default='uniform')
Marginal distribution for the transformed data. The choices are
'uniform' (default) or 'normal'.
ignore_implicit_zeros : bool, optional (default=False)
Only applies to sparse matrices. If True, the sparse entries of the
matrix are discarded to compute the quantile statistics. If False,
these entries are treated as zeros.
subsample : int, optional (default=1e5)
Maximum number of samples used to estimate the quantiles for
computational efficiency. Note that the subsampling procedure may
differ for value-identical sparse and dense matrices.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. Note that this is used by subsampling and smoothing
noise.
copy : boolean, optional, (default=True)
Set to False to perform inplace transformation and avoid a copy (if the
input is already a numpy array).
Attributes
----------
quantiles_ : ndarray, shape (n_quantiles, n_features)
The values corresponding the quantiles of reference.
references_ : ndarray, shape(n_quantiles, )
Quantiles of references.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import quantile_transform
>>> rng = np.random.RandomState(0)
>>> X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0)
>>> quantile_transform(X, n_quantiles=10, random_state=0)
... # doctest: +ELLIPSIS
array([...])
See also
--------
QuantileTransformer : Performs quantile-based scaling using the
``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`).
scale : perform standardization that is faster, but less robust
to outliers.
robust_scale : perform robust standardization that removes the influence
of outliers but does not put outliers and inliers on the same scale.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
n = QuantileTransformer(n_quantiles=n_quantiles,
output_distribution=output_distribution,
subsample=subsample,
ignore_implicit_zeros=ignore_implicit_zeros,
random_state=random_state,
copy=copy)
if axis == 0:
return n.fit_transform(X)
elif axis == 1:
return n.fit_transform(X.T).T
else:
raise ValueError("axis should be either equal to 0 or 1. Got"
" axis={}".format(axis))
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/core/base.py | 7 | 38191 | """
Base and utility classes for pandas objects.
"""
from pandas import compat
from pandas.compat import builtins
import numpy as np
from pandas.types.missing import isnull
from pandas.types.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.types.common import is_object_dtype, is_list_like, is_scalar
from pandas.core import common as com
import pandas.core.nanops as nanops
import pandas.lib as lib
from pandas.compat.numpy import function as nv
from pandas.util.decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.common import AbstractMethodError
from pandas.formats.printing import pprint_thing
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _dir_additions(self):
""" add addtional __dir__ for this object """
return set()
def _dir_deletions(self):
""" delete unwanted __dir__ for this object """
return set()
def __dir__(self):
"""
Provide method name lookup and completion
Only provide 'public' methods
"""
rv = set(dir(type(self)))
rv = (rv - self._dir_deletions()) | self._dir_additions()
return sorted(rv)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for a object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attrirbutes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# dict lookup instead of getattr as getattr is false for getter
# which error
if getattr(self, "__frozen", False) and not \
(key in type(self).__dict__ or key == "_cache"):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class PandasDelegate(PandasObject):
""" an abstract base class for delegating methods/properties """
def _delegate_property_get(self, name, *args, **kwargs):
raise TypeError("You cannot access the "
"property {name}".format(name=name))
def _delegate_property_set(self, name, value, *args, **kwargs):
raise TypeError("The property {name} cannot be set".format(name=name))
def _delegate_method(self, name, *args, **kwargs):
raise TypeError("You cannot call method {name}".format(name=name))
@classmethod
def _add_delegate_accessors(cls, delegate, accessors, typ,
overwrite=False):
"""
add accessors to cls from the delegate class
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
acccessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(fget=_getter, fset=_setter,
doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == 'property':
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
class AccessorProperty(object):
"""Descriptor for implementing accessor properties like Series.str
"""
def __init__(self, accessor_cls, construct_accessor):
self.accessor_cls = accessor_cls
self.construct_accessor = construct_accessor
self.__doc__ = accessor_cls.__doc__
def __get__(self, instance, owner=None):
if instance is None:
# this ensures that Series.str.<method> is well defined
return self.accessor_cls
return self.construct_accessor(instance)
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
}
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
_agg_doc = """Aggregate using input function or dict of {column ->
function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Accepted Combinations are:
- string cythonized function name
- function
- list of functions
- dict of columns -> functions
- nested dict of names -> dicts of functions
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
_see_also_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
"""
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
raise SpecificationError('cannot perform renaming '
'for {0} with a nested '
'dictionary'.format(k))
arg = new_arg
from pandas.tools.merge import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(compat.iterkeys(arg))):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
if isinstance(result, list):
result = concat(result, keys=keys, axis=1)
elif isinstance(list(compat.itervalues(result))[0],
ABCDataFrame):
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
from pandas import DataFrame
result = DataFrame(result)
return result, True
elif hasattr(arg, '__iter__'):
return self._aggregate_multiple_funcs(arg, _level=_level), None
else:
result = None
cy_func = self._is_cython_func(arg)
if cy_func and not args and not kwargs:
return getattr(self, cy_func)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com._get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for col in obj:
try:
colg = self._gotitem(col, ndim=1, subset=obj[col])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
return concat(results, keys=keys, axis=1)
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class GroupByMixin(object):
""" provide the groupby facilities to the mixed object """
@staticmethod
def _dispatch(name, *args, **kwargs):
""" dispatch to apply """
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = dict([(attr, getattr(self, attr))
for attr in self._attributes])
self = self.__class__(subset,
groupby=self._groupby[key],
parent=self,
**kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
# Sidenote: This has to be of type list, otherwise it messes up PyTables
# typechecks
def __add__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(super(FrozenList, self).__add__(other))
__iadd__ = __add__
# Python 2 compat
def __getslice__(self, i, j):
return self.__class__(super(FrozenList, self).__getslice__(i, j))
def __getitem__(self, n):
# Python 3 compat
if isinstance(n, slice):
return self.__class__(super(FrozenList, self).__getitem__(n))
return super(FrozenList, self).__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(other + list(self))
def __eq__(self, other):
if isinstance(other, (tuple, FrozenList)):
other = list(other)
return super(FrozenList, self).__eq__(other)
__req__ = __eq__
def __mul__(self, other):
return self.__class__(super(FrozenList, self).__mul__(other))
__imul__ = __mul__
def __reduce__(self):
return self.__class__, (list(self),)
def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__.__name__)
def __unicode__(self):
return pprint_thing(self, quote_strings=True,
escape_chars=('\t', '\r', '\n'))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
str(self))
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
return res
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
put = itemset = fill = _disabled
def _shallow_copy(self):
return self.view()
def values(self):
"""returns *copy* of underlying array"""
arr = self.view(np.ndarray).copy()
return arr
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
class IndexOpsMixin(object):
""" common ops mixin to support a unified inteface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="return the transpose, which is by "
"definition self")
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
""" return the first element of the underlying data as a python
scalar
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
return self._values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
return self._values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
return self.values.base
@property
def _values(self):
""" the internal implementation """
return self.values
def max(self):
""" The maximum value of the object """
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
""" The minimum value of the object """
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return isnull(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Returns object containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
"""
from pandas.core.algorithms import value_counts
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
return result
_shared_docs['unique'] = (
"""
Return %(unique)s of unique values in the object.
Significantly faster than numpy.unique. Includes NA values.
The order of the original is preserved.
Returns
-------
uniques : %(unique)s
""")
@Appender(_shared_docs['unique'] % _indexops_doc_kwargs)
def unique(self):
values = self._values
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.nanops import unique1d
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isnull(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
"""
return self.nunique() == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and is_object_dtype(self):
v += lib.memory_usage_of_objects(self.values)
return v
def factorize(self, sort=False, na_sentinel=-1):
"""
Encode the object as an enumerated type or categorical variable
Parameters
----------
sort : boolean, default False
Sort by values
na_sentinel: int, default -1
Value to mark "not found"
Returns
-------
labels : the indexer to the original array
uniques : the unique Index
"""
from pandas.core.algorithms import factorize
return factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `self` would be preserved.
Parameters
----------
%(value)s : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>>
>>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk' ])
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'])
array([1])
>>> x.searchsorted(['bread', 'eggs'])
array([1, 4])
>>> x.searchsorted(['bread', 'eggs'], side='right')
array([3, 4]) # eggs before milk
""")
@Substitution(klass='IndexOpsMixin', value='key')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, key, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(key, side=side, sorter=sorter)
_shared_docs['drop_duplicates'] = (
"""Return %(klass)s with duplicate values removed
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
take_last : deprecated
%(inplace)s
Returns
-------
deduplicated : %(klass)s
""")
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(_shared_docs['drop_duplicates'] % _indexops_doc_kwargs)
def drop_duplicates(self, keep='first', inplace=False):
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
_shared_docs['duplicated'] = (
"""Return boolean %(duplicated)s denoting duplicate values
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
take_last : deprecated
Returns
-------
duplicated : %(duplicated)s
""")
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(_shared_docs['duplicated'] % _indexops_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(duplicated(self, keep=keep),
index=self.index).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| gpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/linear_model/randomized_l1.py | 27 | 25868 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import warnings
import itertools
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..feature_selection.base import SelectorMixin
from ..utils import (as_float_array, check_random_state, check_X_y, safe_mask,
deprecated)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
@deprecated("The class BaseRandomizedLinearModel is deprecated in 0.19"
" and will be removed in 0.21.")
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
SelectorMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if memory is None:
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif not isinstance(memory, Memory):
raise ValueError("'memory' should either be a string or"
" a sklearn.externals.joblib.Memory"
" instance, got 'memory={!r}' instead.".format(
type(memory)))
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def _get_support_mask(self):
"""Get the boolean mask indicating which features are selected.
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected
for retention.
"""
check_is_fitted(self, 'scores_')
return self.scores_ > self.selection_threshold
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
@deprecated("The class RandomizedLasso is deprecated in 0.19"
" and will be removed in 0.21.")
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features.
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold : float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up calculations.
If set to 'auto' let us decide.
The Gram matrix can also be passed as argument, but it will be used
only for the selection of parameter alpha, if alpha is 'aic' or 'bic'.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : None, str or object with the joblib.Memory interface, optional \
(default=None)
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=None):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
precompute = self.precompute
# A precomputed Gram array is useless, since _randomized_lasso
# change X a each iteration
if hasattr(precompute, '__array__'):
precompute = 'auto'
assert precompute in (True, False, None, 'auto')
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
if C.ndim > 1:
raise ValueError("C should be 1-dimensional array-like, "
"but got a {}-dimensional array-like instead: {}."
.format(C.ndim, C))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
@deprecated("The class RandomizedLogisticRegression is deprecated in 0.19"
" and will be removed in 0.21.")
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Parameters
----------
C : float or array-like of shape [n_reg_parameter], optional, default=1
The regularization parameter C in the LogisticRegression.
When C is an array, fit will take each regularization parameter in C
one by one for LogisticRegression and store results for each one
in ``all_scores_``, where columns and rows represent corresponding
reg_parameters and features.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features.
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : None, str or object with the joblib.Memory interface, optional \
(default=None)
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=None):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in ascending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
@deprecated("The function lasso_stability_path is deprecated in 0.19"
" and will be removed in 0.21.")
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stability path based on randomized Lasso estimates
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : int, RandomState instance or None, optional, default=None
The generator used to randomize the design. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| mit |
rajat1994/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
farr/arfit | arfit/cp_utils.py | 1 | 2306 | import carmcmc as cm
from gatspy.periodic import LombScargleFast
import matplotlib.pyplot as plt
import numpy as np
def csample_from_files(datafile, chainfile, p, q):
data = np.loadtxt(datafile)
times, tind = np.unique(data[:,0], return_index=True)
data = data[tind, :]
chain = np.loadtxt(chainfile)
assert chain.shape[1] == p + q + 5, 'dimension mismatch'
return cm.CarmaSample(data[:,0], data[:,1], data[:,2], None, q=q, trace=chain[:,:-2], loglike=chain[:,-2], logpost=chain[:,-1])
def normalised_lombscargle(ts, ys, dys, oversampling=5, nyquist_factor=3):
model = LombScargleFast().fit(ts, ys, dys)
pers, pows = model.periodogram_auto(oversampling=oversampling, nyquist_factor=nyquist_factor)
fs = 1.0/pers
T = np.max(ts) - np.min(ts)
mu = 1/T*np.trapz(ys, ts)
s2 = 1/T*np.trapz(np.square(ys-mu), ts)
return fs, s2*pows/np.trapz(pows, fs)
def plot_psd_sample_data(sample, oversampling=5, nyquist_factor=3):
psd_low, psd_high, psd_med, fs = sample.plot_power_spectrum(doShow=False)
plt.clf()
plt.loglog(fs, psd_med, '-b', alpha=0.33)
plt.fill_between(fs, psd_low, psd_high, color='b', alpha=0.17)
fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
bw = fs[-1] - fs[0]
T = sample.time[-1] - sample.time[0]
s2 = 1/T*np.trapz(np.square(sample.ysig), sample.time)
noise_level = s2/bw
levels = noise_level*np.sqrt(sample.get_samples('measerr_scale'))
plt.axhline(np.median(levels), color='g', alpha=0.33)
plt.fill_between(fs, np.percentile(levels, 84)+0*fs, np.percentile(levels, 16)+0*fs, color='g', alpha=0.17)
plt.loglog(fs, psd, '-r', alpha=0.33)
def plot_psd_sample_draw(sample, loc='upper left', oversampling=5, nyquist_factor=3):
fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
ys_draw = sample.predict(sample.time, bestfit='random')[0]
fs, dpsd = normalised_lombscargle(sample.time, ys_draw, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
plt.loglog(fs, psd, '-k', label='Data', alpha=0.5)
plt.loglog(fs, dpsd, '-b', label='Prediction', alpha=0.5)
plt.legend(loc=loc)
| mit |
plang85/rough_surfaces | setup.py | 1 | 1052 | from setuptools import setup
setup(name='rough_surfaces',
version='0.1',
description='Analysis, contact and flow - fractures and rough surfaces',
author='Philipp S. Lang',
author_email='[email protected]',
download_url='https://github.com/plang85/rough_surfaces.git',
install_requires=['numpy>=1.9.1',
'scipy>=0.14',
'matplotlib'], # hate this here TODO somehow get rid of plotting stuff
extras_require={
'test': ['pytest>=3.6.0',
'pytest-pep8',
'pytest-xdist',
'pytest-cov',
'codecov'],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Engineers',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['rough_surfaces']) | mit |
hitszxp/scikit-learn | sklearn/decomposition/pca.py | 14 | 22688 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
thp44/delphin_6_automation | delphin_6_automation/sampling/sim_time_prediction.py | 1 | 10260 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import copy
import pandas as pd
import numpy as np
import typing
from sklearn.model_selection import ShuffleSplit, cross_val_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KNeighborsRegressor
import pickle
from bson import Binary
from bson.objectid import ObjectId
# RiBuild Modules
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions.db_templates import sample_entry
from delphin_6_automation.database_interactions.db_templates import time_model_entry
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def get_time_prediction_data() -> pd.DataFrame:
"""
Collects the sample data of the simulated Delphin projects in the database in order to
predict the simulation time
"""
logger.debug('Retrieving time prediction data from Delphin projects')
entries = delphin_entry.Delphin.objects(simulation_time__exists=True).only('sample_data', 'simulation_time')
col = ['time', ] + list(entries[0].sample_data.keys()) + list(entries[0].sample_data['design_option'].keys())
frames = []
for i in range(entries.count()):
entry = entries[i]
data = copy.deepcopy(entry.sample_data)
data.update(entry.sample_data['design_option'])
data['time'] = entry.simulation_time
frames.append(pd.DataFrame(columns=col, data=data, index=[i, ]))
data_frame = pd.concat(frames)
data_frame = data_frame.loc[:, data_frame.columns != 'design_option']
data_frame = data_frame.loc[:, data_frame.columns != 'sequence']
return data_frame
def process_time_data(data_frame: pd.DataFrame) -> typing.Tuple[pd.DataFrame, pd.Series]:
"""Processing of sample data for the machine learning model"""
y_data = data_frame['time']
x_data = data_frame.loc[:, data_frame.columns != 'time']
x_data = x_data.fillna(0.0)
x_data = transform_weather(x_data)
x_data = transform_interior_climate(x_data)
x_data = transform_system_names(x_data)
return x_data, y_data
def transform_interior_climate(data: pd.DataFrame) -> pd.DataFrame:
"""Transforms the interior climate classes into numerical data."""
if 'interior_climate' in data.columns:
if not (data.loc[data.loc[:, 'interior_climate'] == 'a', 'interior_climate']).empty:
data.loc[data.loc[:, 'interior_climate'] == 'a', 'interior_climate'] = 0.0
try:
data.loc[data.loc[:, 'interior_climate'] == 'b', 'interior_climate'] = 1.0
except TypeError:
pass
else:
try:
data.loc[data.loc[:, 'interior_climate'] == 'measured data', 'interior_climate'] = 0.0
except TypeError:
pass
return data
def transform_weather(data: pd.DataFrame) -> pd.DataFrame:
"""Transform the weather stations names into numerical data."""
try:
sys_names = set(data.loc[:, 'exterior_climate'])
except KeyError:
return data
else:
mapper = {}
for i, name in enumerate(sys_names):
mapper[name] = i
data.loc[:, 'exterior_climate'] = data.loc[:, 'exterior_climate'].map(mapper)
return data
def transform_system_names(data: pd.DataFrame) -> pd.DataFrame:
"""Transforms the insulation system names into numerical data"""
try:
sys_names = set(data.loc[data.loc[:, 'system_name'] != 0, 'system_name'])
except KeyError:
return data
else:
mapper = {0: 0}
for i, name in enumerate(sys_names, 1):
mapper[name] = i
data.loc[:, 'system_name'] = data.loc[:, 'system_name'].map(mapper)
return data
def compute_model(x_data: pd.DataFrame, y_data: pd.Series) -> typing.Optional[typing.Tuple[KNeighborsRegressor, dict]]:
"""Generation of the machine learning model"""
logger.debug('Computing ML time prediction model')
ss = ShuffleSplit(n_splits=5, test_size=0.25, random_state=47)
scaler = MinMaxScaler()
best_model = {'score': 0, 'parameters': [1, 'uniform']}
for nn in [3, 5, 7, 9]:
for weight in ['uniform', 'distance']:
knn_reg = KNeighborsRegressor(n_neighbors=nn, weights=weight)
scores = cross_val_score(knn_reg, scaler.fit_transform(x_data), y_data, cv=ss)
scores, features = remove_bad_features(x_data, y_data, scores, knn_reg, scaler, ss)
if scores.mean() > best_model['score']:
best_model['score'] = scores.mean()
best_model['parameters'] = [nn, weight]
best_model['features'] = features
logger.debug(f'Update best model to: {best_model["parameters"]} with score: {best_model["score"]}')
if best_model['score'] <= 0.6:
logger.info(f'No time prediction model with a R2 score above 0.6 could be found. Best model was: {best_model}')
return None, None
else:
logger.info(f'KNN with {best_model["parameters"][0]} neighbors and {best_model["parameters"][1]} weight is the '
f'best model with R2 of {best_model["score"]:.5f}')
model = KNeighborsRegressor(n_neighbors=best_model['parameters'][0],
weights=best_model['parameters'][1]).fit(x_data.loc[:, best_model['features']], y_data)
return model, best_model
def remove_bad_features(x_data, y_data, basis_score, knn, scaler, shufflesplit) -> typing.Tuple[np.ndarray, list]:
"""Removes the features in the input data, which does not contribute to a good score."""
features = x_data.columns
col_del = []
feature_scores = []
for feat in features:
feature_less_data = x_data.loc[:, x_data.columns != feat]
test_scores = cross_val_score(knn, scaler.fit_transform(feature_less_data), y_data,
cv=shufflesplit, scoring='r2')
feature_scores.append((feat, test_scores.mean()))
if test_scores.mean() > basis_score.mean():
col_del.append(feat)
logger.debug(f'Columns to delete: {col_del}')
clean_col = x_data.columns[[c not in col_del
for c in x_data.columns.tolist()]]
cleaned_data = x_data.loc[:, clean_col]
clean_scores = cross_val_score(knn, scaler.fit_transform(cleaned_data), y_data, cv=shufflesplit, scoring='r2')
return clean_scores, list(clean_col)
def upload_model(model: KNeighborsRegressor, model_data: dict, sample_strategy: sample_entry.Strategy) -> ObjectId:
"""Uploads the machine learning model to the database."""
time_model_doc = sample_strategy.time_prediction_model
pickled_model = pickle.dumps(model)
if time_model_doc:
time_model_doc.update(set__model=Binary(pickled_model))
time_model_doc.update(set__test_score=model_data['score'])
time_model_doc.update(set__model_parameters=model_data['parameters'])
time_model_doc.update(set__model_features=model_data['features'])
else:
time_model_doc = time_model_entry.TimeModel()
time_model_doc.model = Binary(pickled_model)
time_model_doc.test_score = model_data['score']
time_model_doc.model_parameters = model_data['parameters']
time_model_doc.model_features = model_data['features']
time_model_doc.sample_strategy = sample_strategy
time_model_doc.save()
sample_strategy.update(set__time_prediction_model=time_model_doc)
logger.info(f'Updated time prediction model with ID {time_model_doc.id} for Sample Strategy with ID '
f'{sample_strategy.id}')
return time_model_doc.id
def create_upload_time_prediction_model(strategy: sample_entry.Strategy) -> typing.Optional[ObjectId]:
"""Collects data, generates and uploads a simulation time prediction model to the database."""
simulation_data = get_time_prediction_data()
x_data, y_data = process_time_data(simulation_data)
model, model_data = compute_model(x_data, y_data)
if model and model_data:
model_id = upload_model(model, model_data, strategy)
return model_id
else:
logger.info(f'No time prediction model was added to Sample Strategy with ID {strategy.id}')
return None
def process_inputs(raw_inputs: dict, model_features: dict) -> pd.DataFrame:
"""Process the sample inputs so a simulation time estimate can be made."""
logger.debug('Processing ML inputs')
data = {'time': None}
raw_inputs.update(raw_inputs['design_option'])
del raw_inputs['design_option']
for key in raw_inputs.keys():
if key in model_features:
data[key] = [raw_inputs[key], ]
df = pd.DataFrame.from_dict(data)
return process_time_data(df)[0]
def simulation_time_prediction_ml(delphin_doc: delphin_entry.Delphin, model_entry: time_model_entry.TimeModel) -> int:
"""Predict the simulation time of a Delphin project."""
time_model = pickle.loads(model_entry.model)
inputs = process_inputs(delphin_doc.sample_data, model_entry.model_features)
sim_time_secs = time_model.predict(inputs)
sim_time_mins = max(int(sim_time_secs / 60), 5)
delphin_doc.update(set__estimated_simulation_time=sim_time_mins)
logger.debug(f'Updated expected_simulation_time to: {sim_time_mins}min for project with ID: {delphin_doc.id}')
return sim_time_mins
def queue_priorities_on_time_prediction(sample_doc: sample_entry.Sample):
"""Update the queue priorities based on the simulation time predictions."""
logger.info(f'Updating queue based on time for projects in sample with ID: {sample_doc.id}')
max_time = np.array([doc.estimated_simulation_time
for doc in sample_doc.delphin_docs]).max()
[doc.update(set__queue_priority=doc.estimated_simulation_time/max_time)
for doc in sample_doc.delphin_docs]
| mit |
joshua-cogliati-inl/moose | modules/tensor_mechanics/tests/drucker_prager/small_deform3.py | 23 | 3585 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(scheme, sqrtj2):
cohesion = 10
friction_degrees = 35
tip_smoother = 8
friction = friction_degrees * np.pi / 180.0
if (scheme == "native"):
aaa = cohesion
bbb = np.tan(friction)
elif (scheme == "outer_tip"):
aaa = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 - np.sin(friction))
bbb = 2 * np.sin(friction) / np.sqrt(3) / (3.0 - np.sin(friction))
elif (scheme == "inner_tip"):
aaa = 2 * np.sqrt(3) * cohesion * np.cos(friction) / (3.0 + np.sin(friction))
bbb = 2 * np.sin(friction) / np.sqrt(3) / (3.0 + np.sin(friction))
elif (scheme == "lode_zero"):
aaa = cohesion * np.cos(friction)
bbb = np.sin(friction) / 3.0
elif (scheme == "inner_edge"):
aaa = 3 * cohesion * np.cos(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
bbb = np.sin(friction) / np.sqrt(9.0 + 3.0 * np.power(np.sin(friction), 2))
return (aaa - np.sqrt(tip_smoother * tip_smoother + sqrtj2 * sqrtj2)) / bbb
def sigma_mean(stress):
return (stress[0] + stress[3] + stress[5])/3.0
def sigma_bar(stress):
mean = sigma_mean(stress)
return np.sqrt(0.5 * (np.power(stress[0] - mean, 2) + 2*stress[1]*stress[1] + 2*stress[2]*stress[2] + np.power(stress[3] - mean, 2) + 2*stress[4]*stress[4] + np.power(stress[5] - mean, 2)))
def third_inv(stress):
mean = sigma_mean(stress)
return (stress[0] - mean)*(stress[3] - mean)*(stress[5] - mean)
def lode_angle(stress):
bar = sigma_bar(stress)
third = third_inv(stress)
return np.arcsin(-1.5 * np.sqrt(3.0) * third / np.power(bar, 3)) / 3.0
def moose_result(fn):
f = open(fn)
x = []
y = []
for line in f:
if not line.strip():
continue
line = line.strip()
if line.startswith("time") or line.startswith("0"):
continue
line = map(float, line.split(","))
if line[1] < -1E-10:
continue # this is an elastic deformation
trace = 3.0 * sigma_mean(line[3:])
bar = sigma_bar(line[3:])
x.append(trace)
y.append(bar)
f.close()
return (x, y)
plt.figure()
sqrtj2 = np.arange(0, 30, 0.25)
plt.plot(expected("native", sqrtj2), sqrtj2, 'k-', label = 'expected (native)')
mr = moose_result("gold/small_deform3_native.csv")
plt.plot(mr[0], mr[1], 'k^', label = 'MOOSE (native)')
plt.plot(expected("outer_tip", sqrtj2), sqrtj2, 'g-', label = 'expected (outer_tip)')
mr = moose_result("gold/small_deform3_outer_tip.csv")
plt.plot(mr[0], mr[1], 'g^', label = 'MOOSE (outer_tip)')
plt.plot(expected("inner_tip", sqrtj2), sqrtj2, 'b-', label = 'expected (inner_tip)')
mr = moose_result("gold/small_deform3_inner_tip.csv")
plt.plot(mr[0], mr[1], 'b^', label = 'MOOSE (inner_tip)')
plt.plot(expected("lode_zero", sqrtj2), sqrtj2, 'c-', label = 'expected (lode_zero)')
mr = moose_result("gold/small_deform3_lode_zero.csv")
plt.plot(mr[0], mr[1], 'c^', label = 'MOOSE (lode_zero)')
plt.plot(expected("inner_edge", sqrtj2), sqrtj2, 'r-', label = 'expected (inner_edge)')
mr = moose_result("gold/small_deform3_inner_edge.csv")
plt.plot(mr[0], mr[1], 'r^', label = 'MOOSE (inner_edge)')
legend = plt.legend(bbox_to_anchor=(1.16, 0.95))
for label in legend.get_texts():
label.set_fontsize('small')
plt.xlabel("Tr(stress)")
plt.ylabel("sqrt(J2)")
plt.title("Drucker-Prager yield function on meridional plane")
plt.axis([-25, 15, 0, 25])
plt.savefig("small_deform3.png")
sys.exit(0)
| lgpl-2.1 |
Vimos/scikit-learn | sklearn/svm/setup.py | 83 | 3160 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.pyx']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
# liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.pyx',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
# end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.pyx']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| agpl-3.0 |
pauldeng/nilmtk | nilmtk/metergroup.py | 2 | 70088 | from __future__ import print_function, division
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from datetime import timedelta
from warnings import warn
from sys import stdout
from collections import Counter
from copy import copy, deepcopy
import gc
from collections import namedtuple
# NILMTK imports
from .elecmeter import ElecMeter, ElecMeterID
from .appliance import Appliance
from .datastore.datastore import join_key
from .utils import (tree_root, nodes_adjacent_to_root, simplest_type_for,
flatten_2d_list, convert_to_timestamp, normalise_timestamp,
print_on_line, convert_to_list, append_or_extend_list,
most_common, capitalise_first_letter)
from .plots import plot_series
from .measurement import (select_best_ac_type, AC_TYPES, LEVEL_NAMES,
PHYSICAL_QUANTITIES_TO_AVERAGE)
from nilmtk.exceptions import MeasurementError
from .electric import Electric
from .timeframe import TimeFrame, split_timeframes
from .preprocessing import Apply
from .datastore import MAX_MEM_ALLOWANCE_IN_BYTES
from nilmtk.timeframegroup import TimeFrameGroup
# MeterGroupID.meters is a tuple of ElecMeterIDs. Order doesn't matter.
# (we can't use a set because sets aren't hashable so we can't use
# a set as a dict key or a DataFrame column name.)
MeterGroupID = namedtuple('MeterGroupID', ['meters'])
class MeterGroup(Electric):
"""A group of ElecMeter objects. Can contain nested MeterGroup objects.
Implements many of the same methods as ElecMeter.
Attributes
----------
meters : list of ElecMeters or nested MeterGroups
disabled_meters : list of ElecMeters or nested MeterGroups
name : only set by functions like 'groupby' and 'select_top_k'
"""
def __init__(self, meters=None, disabled_meters=None):
self.meters = convert_to_list(meters)
self.disabled_meters = convert_to_list(disabled_meters)
self.name = ""
def import_metadata(self, store, elec_meters, appliances, building_id):
"""
Parameters
----------
store : nilmtk.DataStore
elec_meters : dict of dicts
metadata for each ElecMeter
appliances : list of dicts
metadata for each Appliance
building_id : BuildingID
"""
# Sanity checking
assert isinstance(elec_meters, dict)
assert isinstance(appliances, list)
assert isinstance(building_id, tuple)
if not elec_meters:
warn("Building {} has an empty 'elec_meters' object."
.format(building_id.instance), RuntimeWarning)
if not appliances:
warn("Building {} has an empty 'appliances' list."
.format(building_id.instance), RuntimeWarning)
# Load static Meter Devices
ElecMeter.load_meter_devices(store)
# Load each meter
for meter_i, meter_metadata_dict in elec_meters.iteritems():
meter_id = ElecMeterID(instance=meter_i,
building=building_id.instance,
dataset=building_id.dataset)
meter = ElecMeter(store, meter_metadata_dict, meter_id)
self.meters.append(meter)
# Load each appliance
for appliance_md in appliances:
appliance_md['dataset'] = building_id.dataset
appliance_md['building'] = building_id.instance
appliance = Appliance(appliance_md)
meter_ids = [ElecMeterID(instance=meter_instance,
building=building_id.instance,
dataset=building_id.dataset)
for meter_instance in appliance.metadata['meters']]
if appliance.n_meters == 1:
# Attach this appliance to just a single meter
meter = self[meter_ids[0]]
if isinstance(meter, MeterGroup): # MeterGroup of site_meters
metergroup = meter
for meter in metergroup.meters:
meter.appliances.append(appliance)
else:
meter.appliances.append(appliance)
else:
# DualSupply or 3-phase appliance so need a meter group
metergroup = MeterGroup()
metergroup.meters = [self[meter_id] for meter_id in meter_ids]
for meter in metergroup.meters:
# We assume that any meters used for measuring
# dual-supply or 3-phase appliances are not also used
# for measuring single-supply appliances.
self.meters.remove(meter)
meter.appliances.append(appliance)
self.meters.append(metergroup)
# disable disabled meters
meters_to_disable = [m for m in self.meters
if isinstance(m, ElecMeter)
and m.metadata.get('disabled')]
for meter in meters_to_disable:
self.meters.remove(meter)
self.disabled_meters.append(meter)
def union(self, other):
"""
Returns
-------
new MeterGroup where its set of `meters` is the union of
`self.meters` and `other.meters`.
"""
if not isinstance(other, MeterGroup):
raise TypeError()
return MeterGroup(set(self.meters).union(other.meters))
def dominant_appliance(self):
dominant_appliances = [meter.dominant_appliance()
for meter in self.meters]
dominant_appliances = list(set(dominant_appliances))
n_dominant_appliances = len(dominant_appliances)
if n_dominant_appliances == 0:
return
elif n_dominant_appliances == 1:
return dominant_appliances[0]
else:
raise RuntimeError(
"More than one dominant appliance in MeterGroup!"
" (The dominant appliance per meter should be manually"
" specified in the metadata. If it isn't and if there are"
" multiple appliances for a meter then NILMTK assumes"
" all appliances on that meter are dominant. NILMTK"
" can't automatically distinguish between multiple"
" appliances on the same meter (at least,"
" not without using NILM!))")
def nested_metergroups(self):
return [m for m in self.meters if isinstance(m, MeterGroup)]
def __getitem__(self, key):
"""Get a single meter using appliance type and instance unless
ElecMeterID is supplied.
These formats for `key` are accepted:
Retrieve a meter using details of the meter:
* `1` - retrieves meter instance 1, raises Exception if there are
more than one meter with this instance, raises KeyError
if none are found. If meter instance 1 is in a nested MeterGroup
then retrieve the ElecMeter, not the MeterGroup.
* `ElecMeterID(1, 1, 'REDD')` - retrieves meter with specified meter ID
* `MeterGroupID(meters=(ElecMeterID(1, 1, 'REDD')))` - retrieves
existing nested MeterGroup containing exactly meter instances 1 and 2.
* `[ElecMeterID(1, 1, 'REDD'), ElecMeterID(2, 1, 'REDD')]` - retrieves
existing nested MeterGroup containing exactly meter instances 1 and 2.
* `ElecMeterID(0, 1, 'REDD')` - instance `0` means `mains`. This returns
a new MeterGroup of all site_meters in building 1 in REDD.
* `ElecMeterID((1,2), 1, 'REDD')` - retrieve existing MeterGroup
which contains exactly meters 1 & 2.
* `(1, 2, 'REDD')` - converts to ElecMeterID and treats as an ElecMeterID.
Items must be in the order expected for an ElecMeterID.
Retrieve a meter using details of appliances attached to the meter:
* `'toaster'` - retrieves meter or group upstream of toaster instance 1
* `'toaster', 2` - retrieves meter or group upstream of toaster instance 2
* `{'dataset': 'redd', 'building': 3, 'type': 'toaster', 'instance': 2}`
- specify an appliance
Returns
-------
ElecMeter or MeterGroup
"""
if isinstance(key, str):
# default to get first meter
return self[(key, 1)]
elif isinstance(key, ElecMeterID):
if isinstance(key.instance, tuple):
# find meter group from a key of the form
# ElecMeterID(instance=(1,2), building=1, dataset='REDD')
for group in self.nested_metergroups():
if (set(group.instance()) == set(key.instance) and
group.building() == key.building and
group.dataset() == key.dataset):
return group
# Else try to find an ElecMeter with instance=(1,2)
for meter in self.meters:
if meter.identifier == key:
return meter
elif key.instance == 0:
metergroup_of_building = self.select(
building=key.building, dataset=key.dataset)
return metergroup_of_building.mains()
else:
for meter in self.meters:
if meter.identifier == key:
return meter
raise KeyError(key)
elif isinstance(key, MeterGroupID):
key_meters = set(key.meters)
for group in self.nested_metergroups():
if (set(group.identifier.meters) == key_meters):
return group
raise KeyError(key)
# find MeterGroup from list of ElecMeterIDs
elif isinstance(key, list):
if not all([isinstance(item, tuple) for item in key]):
raise TypeError("requires a list of ElecMeterID objects.")
for meter in self.meters: # TODO: write unit tests for this
# list of ElecMeterIDs. Return existing MeterGroup
if isinstance(meter, MeterGroup):
metergroup = meter
meter_ids = set(metergroup.identifier.meters)
if meter_ids == set(key):
return metergroup
raise KeyError(key)
elif isinstance(key, tuple):
if len(key) == 2:
if isinstance(key[0], str):
return self[{'type': key[0], 'instance': key[1]}]
else:
# Assume we're dealing with a request for 2 ElecMeters
return MeterGroup([self[i] for i in key])
elif len(key) == 3:
return self[ElecMeterID(*key)]
else:
raise TypeError()
elif isinstance(key, dict):
meters = []
for meter in self.meters:
if meter.matches_appliances(key):
meters.append(meter)
if len(meters) == 1:
return meters[0]
elif len(meters) > 1:
raise Exception('search terms match {} appliances'
.format(len(meters)))
else:
raise KeyError(key)
elif isinstance(key, int) and not isinstance(key, bool):
meters_found = []
for meter in self.meters:
if isinstance(meter.instance(), int):
if meter.instance() == key:
meters_found.append(meter)
elif isinstance(meter.instance(), (tuple, list)):
if key in meter.instance():
if isinstance(meter, MeterGroup):
print("Meter", key, "is in a nested meter group."
" Retrieving just the ElecMeter.")
meters_found.append(meter[key])
else:
meters_found.append(meter)
n_meters_found = len(meters_found)
if n_meters_found > 1:
raise Exception('{} meters found with instance == {}: {}'
.format(n_meters_found, key, meters_found))
elif n_meters_found == 0:
raise KeyError(
'No meters found with instance == {}'.format(key))
else:
return meters_found[0]
else:
raise TypeError()
def matches(self, key):
for meter in self.meters:
if meter.matches(key):
return True
return False
def select(self, **kwargs):
"""Select a group of meters based on meter metadata.
e.g.
* select(building=1, sample_period=6)
* select(room='bathroom')
If multiple criteria are supplied then these are ANDed together.
Returns
-------
new MeterGroup of selected meters.
Ideas for the future (not implemented yet!)
-------------------------------------------
* select(category=['ict', 'lighting'])
* select([(fridge, 1), (tv, 1)]) # get specifically fridge 1 and tv 1
* select(name=['fridge', 'tv']) # get all fridges and tvs
* select(category='lighting', except={'room'=['kitchen lights']})
* select('all', except=[('tv', 1)])
Also: see if we can do select(category='lighting' | name='tree lights')
or select(energy > 100)?? Perhaps using:
* Python's eval function something like this:
>>> s = pd.Series(np.random.randn(5))
>>> eval('(x > 0) | (index > 2)', {'x':s, 'index':s.index})
Hmm, yes, maybe we should just implement this! e.g.
select("(category == 'lighting') | (category == 'ict')")
But what about:
* select('total_energy > 100')
* select('mean(hours_on_per_day) > 3')
* select('max(hours_on_per_day) > 5')
* select('max(power) > 2000')
* select('energy_per_day > 2')
* select('rank_by_energy > 5') # top_k(5)
* select('rank_by_proportion > 0.2')
Maybe don't bother. That's easy enough
to get with itemised_energy(). Although these are quite nice
and shouldn't be too hard. Would need to only calculate
these stats if necessary though (e.g. by checking if 'total_energy'
is in the query string before running `eval`)
* or numexpr: https://github.com/pydata/numexpr
* see Pandas.eval():
* http://pandas.pydata.org/pandas-docs/stable/indexing.html#the-query-method-experimental
* https://github.com/pydata/pandas/blob/master/pandas/computation/eval.py#L119
"""
selected_meters = []
exception_raised_every_time = True
exception = None
func = kwargs.pop('func', 'matches')
for meter in self.meters:
try:
match = getattr(meter, func)(kwargs)
except KeyError as e:
exception = e
else:
exception_raised_every_time = False
if match:
selected_meters.append(meter)
if exception_raised_every_time and exception is not None:
raise exception
return MeterGroup(selected_meters)
def select_using_appliances(self, **kwargs):
"""Select a group of meters based on appliance metadata.
e.g.
* select(category='lighting')
* select(type='fridge')
* select(building=1, category='lighting')
* select(room='bathroom')
If multiple criteria are supplied then these are ANDed together.
Returns
-------
new MeterGroup of selected meters.
"""
return self.select(func='matches_appliances', **kwargs)
def from_list(self, meter_ids):
"""
Parameters
----------
meter_ids : list or tuple
Each element is an ElecMeterID or a MeterGroupID.
Returns
-------
MeterGroup
"""
meter_ids = list(meter_ids)
meter_ids = list(set(meter_ids)) # make unique
meters = []
def append_meter_group(meter_id):
try:
# see if there is an existing MeterGroup
metergroup = self[meter_id]
except KeyError:
# there is no existing MeterGroup so assemble one
metergroup = self.from_list(meter_id.meters)
meters.append(metergroup)
for meter_id in meter_ids:
if isinstance(meter_id, ElecMeterID):
meters.append(self[meter_id])
elif isinstance(meter_id, MeterGroupID):
append_meter_group(meter_id)
elif isinstance(meter_id, tuple):
meter_id = MeterGroupID(meters=meter_id)
append_meter_group(meter_id)
else:
raise TypeError()
return MeterGroup(meters)
@classmethod
def from_other_metergroup(cls, other, dataset):
"""Assemble a new meter group using the same meter IDs and nested
MeterGroups as `other`. This is useful for preparing a ground truth
metergroup from a meter group of NILM predictions.
Parameters
----------
other : MeterGroup
dataset : string
The `name` of the dataset for the ground truth. e.g. 'REDD'
Returns
-------
MeterGroup
"""
other_identifiers = other.identifier.meters
new_identifiers = []
for other_id in other_identifiers:
new_id = other_id._replace(dataset=dataset)
if isinstance(new_id.instance, tuple):
nested = []
for instance in new_id.instance:
new_nested_id = new_id._replace(instance=instance)
nested.append(new_nested_id)
new_identifiers.append(tuple(nested))
else:
new_identifiers.append(new_id)
metergroup = MeterGroup()
metergroup.from_list(new_identifiers)
return metergroup
def __eq__(self, other):
if isinstance(other, MeterGroup):
return set(other.meters) == set(self.meters)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def appliances(self):
appliances = set()
for meter in self.meters:
appliances.update(meter.appliances)
return list(appliances)
def dominant_appliances(self):
appliances = set()
for meter in self.meters:
appliances.add(meter.dominant_appliance())
return list(appliances)
def values_for_appliance_metadata_key(self, key,
only_consider_dominant_appliance=True):
"""
Parameters
----------
key : str
e.g. 'type' or 'categories' or 'room'
Returns
-------
list
"""
values = []
if only_consider_dominant_appliance:
appliances = self.dominant_appliances()
else:
appliances = self.appliances
for appliance in appliances:
value = appliance.metadata.get(key)
append_or_extend_list(values, value)
value = appliance.type.get(key)
append_or_extend_list(values, value)
return list(set(values))
def get_labels(self, meter_ids, pretty=True):
"""Create human-readable meter labels.
Parameters
----------
meter_ids : list of ElecMeterIDs (or 3-tuples in same order as ElecMeterID)
Returns
-------
list of strings describing the appliances.
"""
meters = [self[meter_id] for meter_id in meter_ids]
labels = [meter.label(pretty=pretty) for meter in meters]
return labels
def __repr__(self):
s = "{:s}(meters=\n".format(self.__class__.__name__)
for meter in self.meters:
s += " " + str(meter).replace("\n", "\n ") + "\n"
s += ")"
return s
@property
def identifier(self):
"""Returns a MeterGroupID."""
return MeterGroupID(meters=tuple([meter.identifier for meter in self.meters]))
def instance(self):
"""Returns tuple of integers where each int is a meter instance."""
return tuple([meter.instance() for meter in self.meters])
def building(self):
"""Returns building instance integer(s)."""
buildings = set([meter.building() for meter in self.meters])
return simplest_type_for(buildings)
def contains_meters_from_multiple_buildings(self):
"""Returns True if this MeterGroup contains meters from
more than one building."""
building = self.building()
try:
n = len(building)
except TypeError:
return False
else:
return n > 1
def dataset(self):
"""Returns dataset string(s)."""
datasets = set([meter.dataset() for meter in self.meters])
return simplest_type_for(datasets)
def sample_period(self):
"""Returns max of all meter sample periods."""
return max([meter.sample_period() for meter in self.meters])
def wiring_graph(self):
"""Returns a networkx.DiGraph of connections between meters."""
wiring_graph = nx.DiGraph()
def _build_wiring_graph(meters):
for meter in meters:
if isinstance(meter, MeterGroup):
metergroup = meter
_build_wiring_graph(metergroup.meters)
else:
upstream_meter = meter.upstream_meter(raise_warning=False)
# Need to ensure we use the same object
# if upstream meter already exists.
if upstream_meter is not None:
for node in wiring_graph.nodes():
if upstream_meter == node:
upstream_meter = node
break
wiring_graph.add_edge(upstream_meter, meter)
_build_wiring_graph(self.meters)
return wiring_graph
def draw_wiring_graph(self, show_meter_labels=True):
graph = self.wiring_graph()
meter_labels = {meter: meter.instance() for meter in graph.nodes()}
pos = nx.graphviz_layout(graph, prog='dot')
nx.draw(graph, pos, labels=meter_labels, arrows=False)
if show_meter_labels:
meter_labels = {meter: meter.label() for meter in graph.nodes()}
for meter, name in meter_labels.iteritems():
x, y = pos[meter]
if meter.is_site_meter():
delta_y = 5
else:
delta_y = -5
plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center')
ax = plt.gca()
return graph, ax
def load(self, **kwargs):
"""Returns a generator of DataFrames loaded from the DataStore.
By default, `load` will load all available columns from the DataStore.
Specific columns can be selected in one or two mutually exclusive ways:
1. specify a list of column names using the `cols` parameter.
2. specify a `physical_quantity` and/or an `ac_type` parameter to ask
`load` to automatically select columns.
Each meter in the MeterGroup will first be resampled before being added.
The returned DataFrame will include NaNs at timestamps where no meter
had a sample (after resampling the meter).
Parameters
----------
sample_period : int or float, optional
Number of seconds to use as sample period when reindexing meters.
If not specified then will use the max of all meters' sample_periods.
resample_kwargs : dict of key word arguments (other than 'rule') to
`pass to pd.DataFrame.resample()`
chunksize : int, optional
the maximum number of rows per chunk. Note that each chunk is
guaranteed to be of length <= chunksize. Each chunk is *not*
guaranteed to be exactly of length == chunksize.
**kwargs :
any other key word arguments to pass to `self.store.load()` including:
physical_quantity : string or list of strings
e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy'].
If a single string then load columns only for that physical quantity.
If a list of strings then load columns for all those physical
quantities.
ac_type : string or list of strings, defaults to None
Where 'ac_type' is short for 'alternating current type'. e.g.
'reactive' or 'active' or 'apparent'.
If set to None then will load all AC types per physical quantity.
If set to 'best' then load the single best AC type per
physical quantity.
If set to a single AC type then load just that single AC type per
physical quantity, else raise an Exception.
If set to a list of AC type strings then will load all those
AC types and will raise an Exception if any cannot be found.
cols : list of tuples, using NILMTK's vocabulary for measurements.
e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')]
`cols` can't be used if `ac_type` and/or `physical_quantity` are set.
preprocessing : list of Node subclass instances
e.g. [Clip()]
Returns
---------
Always return a generator of DataFrames (even if it only has a single
column).
.. note:: Different AC types will be treated separately.
"""
# Handle kwargs
sample_period = kwargs.setdefault('sample_period', self.sample_period())
sections = kwargs.pop('sections', [self.get_timeframe()])
chunksize = kwargs.pop('chunksize', MAX_MEM_ALLOWANCE_IN_BYTES)
duration_threshold = sample_period * chunksize
columns = pd.MultiIndex.from_tuples(
self._convert_physical_quantity_and_ac_type_to_cols(**kwargs)['cols'],
names=LEVEL_NAMES)
freq = '{:d}S'.format(int(sample_period))
verbose = kwargs.get('verbose')
# Check for empty sections
sections = [section for section in sections if section]
if not sections:
print("No sections to load.")
yield pd.DataFrame(columns=columns)
return
# Loop through each section to load
for section in split_timeframes(sections, duration_threshold):
kwargs['sections'] = [section]
start = normalise_timestamp(section.start, freq)
tz = None if start.tz is None else start.tz.zone
index = pd.date_range(
start.tz_localize(None), section.end.tz_localize(None), tz=tz,
closed='left', freq=freq)
chunk = combine_chunks_from_generators(
index, columns, self.meters, kwargs)
yield chunk
def _convert_physical_quantity_and_ac_type_to_cols(self, **kwargs):
all_columns = set()
kwargs = deepcopy(kwargs)
for meter in self.meters:
kwargs_copy = deepcopy(kwargs)
new_kwargs = meter._convert_physical_quantity_and_ac_type_to_cols(**kwargs_copy)
cols = new_kwargs.get('cols', [])
for col in cols:
all_columns.add(col)
kwargs['cols'] = list(all_columns)
return kwargs
def _meter_generators(self, **kwargs):
"""Returns (list of identifiers, list of generators)."""
generators = []
identifiers = []
for meter in self.meters:
kwargs_copy = deepcopy(kwargs)
generator = meter.load(**kwargs_copy)
generators.append(generator)
identifiers.append(meter.identifier)
return identifiers, generators
def simultaneous_switches(self, threshold=40):
"""
Parameters
----------
threshold : number, threshold in Watts
Returns
-------
sim_switches : pd.Series of type {timestamp: number of
simultaneous switches}
Notes
-----
This function assumes that the submeters in this MeterGroup
are all aligned. If they are not then you should align the
meters, e.g. by using an `Apply` node with `resample`.
"""
submeters = self.submeters().meters
count = Counter()
for meter in submeters:
switch_time_meter = meter.switch_times(threshold)
for timestamp in switch_time_meter:
count[timestamp] += 1
sim_switches = pd.Series(count)
# Should be 2 or more appliances changing state at the same time
sim_switches = sim_switches[sim_switches >= 2]
return sim_switches
def mains(self):
"""
Returns
-------
ElecMeter or MeterGroup or None
"""
if self.contains_meters_from_multiple_buildings():
msg = ("This MeterGroup contains meters from buildings '{}'."
" It only makes sense to get `mains` if the MeterGroup"
" contains meters from a single building."
.format(self.building()))
raise RuntimeError(msg)
site_meters = [meter for meter in self.meters if meter.is_site_meter()]
n_site_meters = len(site_meters)
if n_site_meters == 0:
return
elif n_site_meters == 1:
return site_meters[0]
else:
return MeterGroup(meters=site_meters)
def use_alternative_mains(self):
"""Swap present mains meter(s) for mains meter(s) in `disabled_meters`.
This is useful if the dataset has multiple, redundant mains meters
(e.g. in UK-DALE buildings 1, 2 and 5).
"""
present_mains = [m for m in self.meters if m.is_site_meter()]
alternative_mains = [m for m in self.disabled_meters if m.is_site_meter()]
if not alternative_mains:
raise RuntimeError("No site meters found in `self.disabled_meters`")
for meter in present_mains:
self.meters.remove(meter)
self.disabled_meters.append(meter)
for meter in alternative_mains:
self.meters.append(meter)
self.disabled_meters.remove(meter)
def upstream_meter(self):
"""Returns single upstream meter.
Raises RuntimeError if more than 1 upstream meter.
"""
upstream_meters = []
for meter in self.meters:
upstream_meters.append(meter.upstream_meter())
unique_upstream_meters = list(set(upstream_meters))
if len(unique_upstream_meters) > 1:
raise RuntimeError("{:d} upstream meters found for meter group."
" Should be 1.".format(len(unique_upstream_meters)))
return unique_upstream_meters[0]
def meters_directly_downstream_of_mains(self):
"""Returns new MeterGroup."""
meters = nodes_adjacent_to_root(self.wiring_graph())
assert isinstance(meters, list)
return MeterGroup(meters)
def submeters(self):
"""Returns new MeterGroup of all meters except site_meters"""
submeters = [meter for meter in self.meters
if not meter.is_site_meter()]
return MeterGroup(submeters)
def is_site_meter(self):
"""Returns True if any meters are site meters"""
return any([meter.is_site_meter() for meter in self.meters])
def total_energy(self, **load_kwargs):
"""Sums together total meter_energy for each meter.
Note that this function does *not* return the total aggregate
energy for a building. Instead this function adds up the total energy
for all the meters contained in this MeterGroup. If you want the total
aggregate energy then please use `MeterGroup.mains().total_energy()`.
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else return a pd.Series with a row for each AC type.
"""
self._check_kwargs_for_full_results_and_sections(load_kwargs)
full_results = load_kwargs.pop('full_results', False)
meter_energies = self._collect_stats_on_all_meters(
load_kwargs, 'total_energy', full_results)
if meter_energies:
total_energy_results = meter_energies[0]
for meter_energy in meter_energies[1:]:
if full_results:
total_energy_results.unify(meter_energy)
else:
total_energy_results += meter_energy
return total_energy_results
def _collect_stats_on_all_meters(self, load_kwargs, func, full_results):
collected_stats = []
for meter in self.meters:
print_on_line("\rCalculating", func, "for", meter.identifier, "... ")
single_stat = getattr(meter, func)(full_results=full_results,
**load_kwargs)
collected_stats.append(single_stat)
if (full_results and len(self.meters) > 1 and
not meter.store.all_sections_smaller_than_chunksize):
warn("at least one section requested from '{}' required"
" multiple chunks to be loaded into memory. This may cause"
" a failure when we try to unify results from multiple"
" meters.".format(meter))
return collected_stats
def dropout_rate(self, **load_kwargs):
"""Sums together total energy for each meter.
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else return either a single number of, if there are multiple
AC types, then return a pd.Series with a row for each AC type.
"""
self._check_kwargs_for_full_results_and_sections(load_kwargs)
full_results = load_kwargs.pop('full_results', False)
dropout_rates = self._collect_stats_on_all_meters(
load_kwargs, 'dropout_rate', full_results)
if full_results and dropout_rates:
dropout_rate_results = dropout_rates[0]
for dr in dropout_rates[1:]:
dropout_rate_results.unify(dr)
return dropout_rate_results
else:
return np.mean(dropout_rates)
def _check_kwargs_for_full_results_and_sections(self, load_kwargs):
if (load_kwargs.get('full_results')
and 'sections' not in load_kwargs
and len(self.meters) > 1):
raise RuntimeError("MeterGroup stats can only return full results"
" objects if you specify 'sections' to load. If"
" you do not specify periods then the results"
" from individual meters are likely to be for"
" different periods and hence"
" cannot be unified.")
def good_sections(self, **kwargs):
"""Returns good sections for just the first meter.
TODO: combine good sections from every meter.
"""
if self.meters:
if len(self.meters) > 1:
warn("As a quick implementation we only get Good Sections from"
" the first meter in the meter group. We should really"
" return the intersection of the good sections for all"
" meters. This will be fixed...")
return self.meters[0].good_sections(**kwargs)
else:
return []
def dataframe_of_meters(self, **kwargs):
"""
Parameters
----------
sample_period : int or float, optional
Number of seconds to use as sample period when reindexing meters.
If not specified then will use the max of all meters' sample_periods.
resample : bool, defaults to True
If True then resample to `sample_period`.
**kwargs :
any other key word arguments to pass to `self.store.load()` including:
ac_type : string, defaults to 'best'
physical_quantity: string, defaults to 'power'
Returns
-------
DataFrame
Each column is a meter.
"""
kwargs.setdefault('sample_period', self.sample_period())
kwargs.setdefault('ac_type', 'best')
kwargs.setdefault('physical_quantity', 'power')
identifiers, generators = self._meter_generators(**kwargs)
segments = []
while True:
chunks = []
ids = []
for meter_id, generator in zip(identifiers, generators):
try:
chunk_from_next_meter = next(generator)
except StopIteration:
continue
if not chunk_from_next_meter.empty:
ids.append(meter_id)
chunks.append(chunk_from_next_meter.sum(axis=1))
if chunks:
df = pd.concat(chunks, axis=1)
df.columns = ids
segments.append(df)
else:
break
if segments:
return pd.concat(segments)
else:
return pd.DataFrame(columns=self.identifier.meters)
def entropy_per_meter(self):
"""Finds the entropy of each meter in this MeterGroup.
Returns
-------
pd.Series of entropy
"""
return self.call_method_on_all_meters('entropy')
def call_method_on_all_meters(self, method):
"""Calls `method` on each element in `self.meters`.
Parameters
----------
method : str
Name of a stats method in `ElecMeter`. e.g. 'correlation'.
Returns
-------
pd.Series of result of `method` called on each element in `self.meters`.
"""
meter_identifiers = list(self.identifier.meters)
result = pd.Series(index=meter_identifiers)
for meter in self.meters:
id_meter = meter.identifier
result[id_meter] = getattr(meter, method)()
return result
def pairwise(self, method):
"""
Calls `method` on all pairs in `self.meters`.
Assumes `method` is symmetrical.
Parameters
----------
method : str
Name of a stats method in `ElecMeter`. e.g. 'correlation'.
Returns
-------
pd.DataFrame of the result of `method` called on each
pair in `self.meters`.
"""
meter_identifiers = list(self.identifier.meters)
result = pd.DataFrame(index=meter_identifiers, columns=meter_identifiers)
for i, m_i in enumerate(self.meters):
for j, m_j in enumerate(self.meters):
id_i = m_i.identifier
id_j = m_j.identifier
if i > j:
result[id_i][id_j] = result[id_j][id_i]
else:
result[id_i][id_j] = getattr(m_i, method)(m_j)
return result
def pairwise_mutual_information(self):
"""
Finds the pairwise mutual information among different
meters in a MeterGroup.
Returns
-------
pd.DataFrame of mutual information between
pair of ElecMeters.
"""
return self.pairwise('mutual_information')
def pairwise_correlation(self):
"""
Finds the pairwise correlation among different
meters in a MeterGroup.
Returns
-------
pd.DataFrame of correlation between pair of ElecMeters.
"""
return self.pairwise('correlation')
def proportion_of_energy_submetered(self, **loader_kwargs):
"""
Returns
-------
float [0,1] or NaN if mains total_energy == 0
"""
print("Running MeterGroup.proportion_of_energy_submetered...")
mains = self.mains()
downstream_meters = self.meters_directly_downstream_of_mains()
proportion = 0.0
verbose = loader_kwargs.get('verbose')
all_nan = True
for m in downstream_meters.meters:
if verbose:
print("Calculating proportion for", m)
prop = m.proportion_of_energy(mains, **loader_kwargs)
if not np.isnan(prop):
proportion += prop
all_nan = False
if verbose:
print(" {:.2%}".format(prop))
if all_nan:
proportion = np.NaN
return proportion
def available_ac_types(self, physical_quantity):
"""Returns set of all available alternating current types for a
specific physical quantity.
Parameters
----------
physical_quantity : str or list of strings
Returns
-------
list of strings e.g. ['apparent', 'active']
"""
all_ac_types = [meter.available_ac_types(physical_quantity)
for meter in self.meters]
return list(set(flatten_2d_list(all_ac_types)))
def available_physical_quantities(self):
"""
Returns
-------
list of strings e.g. ['power', 'energy']
"""
all_physical_quants = [meter.available_physical_quantities()
for meter in self.meters]
return list(set(flatten_2d_list(all_physical_quants)))
def energy_per_meter(self, per_period=None, mains=None,
use_meter_labels=False, **load_kwargs):
"""Returns pd.DataFrame where columns is meter.identifier and
each value is total energy. Index is AC types.
Does not care about wiring hierarchy. Does not attempt to ensure all
channels share the same time sections.
Parameters
----------
per_period : None or offset alias
If None then returns absolute energy used per meter.
If a Pandas offset alias (e.g. 'D' for 'daily') then
will return the average energy per period.
ac_type : None or str
e.g. 'active' or 'best'. Defaults to 'best'.
use_meter_labels : bool
If True then columns will be human-friendly meter labels.
If False then columns will be ElecMeterIDs or MeterGroupIDs
mains : None or MeterGroup or ElecMeter
If None then will return DataFrame without remainder.
If not None then will return a Series including a 'remainder'
row which will be `mains.total_energy() - energy_per_meter.sum()`
and an attempt will be made to use the correct AC_TYPE.
Returns
-------
pd.DataFrame if mains is None else a pd.Series
"""
meter_identifiers = list(self.identifier.meters)
energy_per_meter = pd.DataFrame(columns=meter_identifiers, index=AC_TYPES)
n_meters = len(self.meters)
load_kwargs.setdefault('ac_type', 'best')
for i, meter in enumerate(self.meters):
print('\r{:d}/{:d} {}'.format(i+1, n_meters, meter), end='')
stdout.flush()
if per_period is None:
meter_energy = meter.total_energy(**load_kwargs)
else:
load_kwargs.setdefault('use_uptime', False)
meter_energy = meter.average_energy_per_period(
offset_alias=per_period, **load_kwargs)
energy_per_meter[meter.identifier] = meter_energy
energy_per_meters = energy_per_meter.dropna(how='all')
if use_meter_labels:
energy_per_meter.columns = self.get_labels(energy_per_meter.columns)
if mains is not None:
energy_per_meter = self._energy_per_meter_with_remainder(
energy_per_meter, mains, per_period, **load_kwargs)
return energy_per_meter
def _energy_per_meter_with_remainder(self, energy_per_meter,
mains, per_period, **kwargs):
ac_types = energy_per_meter.keys()
energy_per_meter = energy_per_meter.sum() # Collapse AC_TYPEs into Series
# Find most common ac_type in energy_per_meter:
most_common_ac_type = most_common(ac_types)
mains_ac_types = mains.available_ac_types(
['power', 'energy', 'cumulative energy'])
if most_common_ac_type in mains_ac_types:
mains_ac_type = most_common_ac_type
else:
mains_ac_type = 'best'
# Get mains energy_per_meter
kwargs['ac_type'] = mains_ac_type
if per_period is None:
mains_energy = mains.total_energy(**kwargs)
else:
mains_energy = mains.average_energy_per_period(
offset_alias=per_period, **kwargs)
mains_energy = mains_energy[mains_energy.keys()[0]]
# Calculate remainder
energy_per_meter['Remainder'] = mains_energy - energy_per_meter.sum()
energy_per_meter.sort(ascending=False)
return energy_per_meter
def fraction_per_meter(self, **load_kwargs):
"""Fraction of energy per meter.
Return pd.Series. Index is meter.instance.
Each value is a float in the range [0,1].
"""
energy_per_meter = self.energy_per_meter(**load_kwargs).max()
total_energy = energy_per_meter.sum()
return energy_per_meter / total_energy
def proportion_of_upstream_total_per_meter(self, **load_kwargs):
prop_per_meter = pd.Series(index=self.identifier.meters)
n_meters = len(self.meters)
for i, meter in enumerate(self.meters):
proportion = meter.proportion_of_upstream(**load_kwargs)
print('\r{:d}/{:d} {} = {:.3f}'
.format(i+1, n_meters, meter, proportion), end='')
stdout.flush()
prop_per_meter[meter.identifier] = proportion
prop_per_meter.sort(ascending=False)
return prop_per_meter
def train_test_split(self, train_fraction=0.5):
"""
Parameters
----------
train_fraction
Returns
-------
split_time: pd.Timestamp where split should happen
"""
assert(
0 < train_fraction < 1), "`train_fraction` should be between 0 and 1"
# TODO: currently just works with the first mains meter, assuming
# both to be simultaneosly sampled
mains = self.mains()
good_sections = self.mains().good_sections()
sample_period = mains.device['sample_period']
appx_num_records_in_each_good_section = [
int((ts.end - ts.start).total_seconds() / sample_period) for ts in good_sections]
appx_total_records = sum(appx_num_records_in_each_good_section)
records_in_train = appx_total_records * train_fraction
seconds_in_train = int(records_in_train * sample_period)
if len(good_sections) == 1:
# all data is contained in one good section
split_point = good_sections[
0].start + timedelta(seconds=seconds_in_train)
return split_point
else:
# data is split across multiple time deltas
records_remaining = records_in_train
while records_remaining:
for i, records_in_section in enumerate(appx_num_records_in_each_good_section):
if records_remaining > records_in_section:
records_remaining -= records_in_section
elif records_remaining == records_in_section:
# Next TimeFrame is the split point!!
split_point = good_sections[i + 1].start
return split_point
else:
# Need to split this timeframe
split_point = good_sections[
i].start + timedelta(seconds=sample_period * records_remaining)
return split_point
################## FUNCTIONS NOT YET IMPLEMENTED ###################
# def init_new_dataset(self):
# self.infer_and_set_meter_connections()
# self.infer_and_set_dual_supply_appliances()
# def infer_and_set_meter_connections(self):
# """
# Arguments
# ---------
# meters : list of Meter objects
# """
# Maybe this should be a stand-alone function which
# takes a list of meters???
# raise NotImplementedError
# def infer_and_set_dual_supply_appliances(self):
# raise NotImplementedError
# def total_on_duration(self):
# """Return timedelta"""
# raise NotImplementedError
# def on_durations(self):
# self.get_unique_upstream_meters()
# for each meter, get the on time,
# assuming the on-power-threshold for the
# smallest appliance connected to that meter???
# raise NotImplementedError
# def activity_distribution(self, bin_size, timespan):
# raise NotImplementedError
# def on_off_events(self, minimum_state_duration):
# raise NotImplementedError
def select_top_k(self, k=5, by="energy", asc=False, group_remainder=False, **kwargs):
"""Only select the top K meters, according to energy.
Functions on the entire MeterGroup. So if you mean to select
the top K from only the submeters, please do something like
this:
elec.submeters().select_top_k()
Parameters
----------
k : int, optional, defaults to 5
by: string, optional, defaults to energy
Can select top k by:
* energy
* entropy
asc: bool, optional, defaults to False
By default top_k is in descending order. To select top_k
by ascending order, use asc=True
group_remainder : bool, optional, defaults to False
If True then place all remaining meters into a
nested metergroup.
**kwargs : key word arguments to pass to load()
Returns
-------
MeterGroup
"""
function_map = {'energy': self.fraction_per_meter, 'entropy': self.entropy_per_meter}
top_k_series = function_map[by](**kwargs)
top_k_series.sort(ascending=asc)
top_k_elec_meter_ids = top_k_series[:k].index
top_k_metergroup = self.from_list(top_k_elec_meter_ids)
if group_remainder:
remainder_ids = top_k_series[k:].index
remainder_metergroup = self.from_list(remainder_ids)
remainder_metergroup.name = 'others'
top_k_metergroup.meters.append(remainder_metergroup)
return top_k_metergroup
def groupby(self, key, use_appliance_metadata=True, **kwargs):
"""
e.g. groupby('category')
Returns
-------
MeterGroup of nested MeterGroups: one per group
"""
if not use_appliance_metadata:
raise NotImplementedError()
values = self.values_for_appliance_metadata_key(key)
groups = []
for value in values:
group = self.select_using_appliances(**{key: value})
group.name = value
groups.append(group)
return MeterGroup(groups)
def get_timeframe(self):
"""
Returns
-------
nilmtk.TimeFrame representing the timeframe which is the union
of all meters in self.meters.
"""
timeframe = None
for meter in self.meters:
if timeframe is None:
timeframe = meter.get_timeframe()
elif meter.get_timeframe().empty:
pass
else:
timeframe = timeframe.union(meter.get_timeframe())
return timeframe
def plot(self, kind='separate lines', **kwargs):
"""
Parameters
----------
width : int, optional
Number of points on the x axis required
ax : matplotlib.axes, optional
plot_legend : boolean, optional
Defaults to True. Set to False to not plot legend.
kind : {'separate lines', 'sum', 'area', 'snakey', 'energy bar'}
timeframe : nilmtk.TimeFrame, optional
Defaults to self.get_timeframe()
"""
# Load data and plot each meter
function_map = {
'separate lines': self._plot_separate_lines,
'sum': super(MeterGroup, self).plot,
'area': self._plot_area,
'sankey': self._plot_sankey,
'energy bar': self._plot_energy_bar
}
try:
ax = function_map[kind](**kwargs)
except KeyError:
raise ValueError("'{}' not a valid setting for 'kind' parameter."
.format(kind))
return ax
def _plot_separate_lines(self, ax=None, plot_legend=True, **kwargs):
for meter in self.meters:
if isinstance(meter, MeterGroup):
ax = meter.plot(ax=ax, plot_legend=False, kind='sum', **kwargs)
else:
ax = meter.plot(ax=ax, plot_legend=False, **kwargs)
if plot_legend:
plt.legend()
return ax
def _plot_sankey(self):
graph = self.wiring_graph()
meter_labels = {meter: meter.instance() for meter in graph.nodes()}
pos = nx.graphviz_layout(graph, prog='dot')
#nx.draw(graph, pos, labels=meter_labels, arrows=False)
meter_labels = {meter: meter.label() for meter in graph.nodes()}
for meter, name in meter_labels.iteritems():
x, y = pos[meter]
if meter.is_site_meter():
delta_y = 5
else:
delta_y = -5
plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center')
if not meter.is_site_meter():
upstream_meter = meter.upstream_meter()
proportion_of_upstream = meter.proportion_of_upstream()
print(meter.instance(), upstream_meter.instance(), proportion_of_upstream)
graph[upstream_meter][meter]["weight"] = proportion_of_upstream*10
graph[upstream_meter][meter]["color"] = "blue"
nx.draw(graph, pos, labels=meter_labels, arrows=False)
def _plot_area(self, ax=None, timeframe=None, pretty_labels=True, unit='W',
label_kwargs=None, plot_kwargs=None, threshold=None,
**load_kwargs):
"""
Parameters
----------
plot_kwargs : dict of key word arguments for DataFrame.plot()
unit : {kW or W}
threshold : float or None
if set to a float then any measured value under this threshold
will be set to 0.
Returns
-------
ax, dataframe
"""
# Get start and end times for the plot
timeframe = self.get_timeframe() if timeframe is None else timeframe
if not timeframe:
return ax
load_kwargs['sections'] = [timeframe]
load_kwargs = self._set_sample_period(timeframe, **load_kwargs)
df = self.dataframe_of_meters(**load_kwargs)
if threshold is not None:
df[df <= threshold] = 0
if unit == 'kW':
df /= 1000
if plot_kwargs is None:
plot_kwargs = {}
df.columns = self.get_labels(df.columns, pretty=pretty_labels)
# Set a tiny linewidth otherwise we get lines even if power is zero
# and this looks ugly when drawn above other lines.
plot_kwargs.setdefault('linewidth', 0.0001)
ax = df.plot(kind='area', **plot_kwargs)
ax.set_ylabel("Power ({:s})".format(unit))
return ax, df
def plot_when_on(self, **load_kwargs):
meter_identifiers = list(self.identifier.meters)
fig, ax = plt.subplots()
for i, meter in enumerate(self.meters):
id_meter = meter.identifier
for chunk_when_on in meter.when_on(**load_kwargs):
series_to_plot = chunk_when_on[chunk_when_on==True]
if len(series_to_plot.index):
(series_to_plot+i-1).plot(ax=ax, style='k.')
labels = self.get_labels(meter_identifiers)
plt.yticks(range(len(self.meters)), labels)
plt.ylim((-0.5, len(self.meters)+0.5))
return ax
def plot_good_sections(self, ax=None, label_func='instance',
include_disabled_meters=True, load_kwargs=None,
**plot_kwargs):
"""
Parameters
----------
label_func : str or None
e.g. 'instance' (default) or 'label'
if None then no labels will be produced.
include_disabled_meters : bool
"""
if ax is None:
ax = plt.gca()
if load_kwargs is None:
load_kwargs = {}
# Prepare list of meters
if include_disabled_meters:
meters = self.all_meters()
else:
meters = self.meters
meters = copy(meters)
meters.sort(key=meter_sorting_key, reverse=True)
n = len(meters)
labels = []
for i, meter in enumerate(meters):
good_sections = meter.good_sections(**load_kwargs)
ax = good_sections.plot(ax=ax, y=i, **plot_kwargs)
del good_sections
if label_func:
labels.append(getattr(meter, label_func)())
# Just end numbers
if label_func is None:
labels = [n] + ([''] * (n-1))
# Y tick formatting
ax.set_yticks(np.arange(0, n) + 0.5)
def y_formatter(y, pos):
try:
label = labels[int(y)]
except IndexError:
label = ''
return label
ax.yaxis.set_major_formatter(FuncFormatter(y_formatter))
ax.set_ylim([0, n])
return ax
def _plot_energy_bar(self, ax=None, mains=None):
"""Plot a stacked bar of the energy per meter, in order.
Parameters
----------
ax : matplotlib axes
mains : MeterGroup or ElecMeter, optional
Used to calculate Remainder.
Returns
-------
ax
"""
energy = self.energy_per_meter(mains=mains, per_period='D',
use_meter_labels=True)
energy.sort(ascending=False)
# Plot
ax = pd.DataFrame(energy).T.plot(kind='bar', stacked=True, grid=True,
edgecolor="none", legend=False, width=2)
ax.set_xticks([])
ax.set_ylabel('kWh\nper\nday', rotation=0, ha='center', va='center',
labelpad=15)
cumsum = energy.cumsum()
text_ys = cumsum - (cumsum.diff().fillna(energy['Remainder']) / 2)
for kwh, (label, y) in zip(energy.values, text_ys.iteritems()):
label += " ({:.2f})".format(kwh)
ax.annotate(label, (0, y), color='white', size=8,
horizontalalignment='center',
verticalalignment='center')
return ax
def plot_multiple(self, axes, meter_keys, plot_func,
kwargs_per_meter=None, pretty_label=True, **kwargs):
"""Create multiple subplots.
Parameters
-----------
axes : list of matplotlib axes objects.
e.g. created using `fix, axes = plt.subplots()`
meter_keys : list of keys for identifying ElecMeters or MeterGroups.
e.g. ['fridge', 'kettle', 4, MeterGroupID, ElecMeterID].
Each element is anything that MeterGroup.__getitem__() accepts.
plot_func : string
Name of function from ElecMeter or Electric or MeterGroup
e.g. `plot_power_histogram`
kwargs_per_meter : dict
Provide key word arguments for the plot_func for each meter.
each key is a parameter name for plot_func
each value is a list (same length as `meters`) for specifying a value for
this parameter for each meter.
e.g. {'range': [(0,100), (0,200)]}
pretty_label : bool
**kwargs : any key word arguments to pass the same values to the
plot func for every meter.
Returns
-------
axes (flattened into a 1D list)
"""
axes = flatten_2d_list(axes)
if len(axes) != len(meter_keys):
raise ValueError("`axes` and `meters` must be of equal length.")
if kwargs_per_meter is None:
kwargs_per_meter = {}
meters = [self[meter_key] for meter_key in meter_keys]
for i, (ax, meter) in enumerate(zip(axes, meters)):
kwargs_copy = deepcopy(kwargs)
for parameter, arguments in kwargs_per_meter.iteritems():
kwargs_copy[parameter] = arguments[i]
getattr(meter, plot_func)(ax=ax, **kwargs_copy)
ax.set_title(meter.label(pretty=pretty_label))
return axes
def sort_meters(self):
"""Sorts meters by instance."""
self.meters.sort(key=meter_sorting_key)
def label(self, **kwargs):
"""
Returns
-------
string : A label listing all the appliance types.
"""
if self.name:
label = self.name
if kwargs.get('pretty'):
label = capitalise_first_letter(label)
return label
return ", ".join(set([meter.label(**kwargs) for meter in self.meters]))
def clear_cache(self):
"""Clear cache on all meters in this MeterGroup."""
for meter in self.meters:
meter.clear_cache()
def correlation_of_sum_of_submeters_with_mains(self, **load_kwargs):
print("Running MeterGroup.correlation_of_sum_of_submeters_with_mains...")
submeters = self.meters_directly_downstream_of_mains()
return self.mains().correlation(submeters, **load_kwargs)
def all_meters(self):
"""Returns a list of self.meters + self.disabled_meters."""
return self.meters + self.disabled_meters
def describe(self, compute_expensive_stats=True, **kwargs):
"""Returns pd.Series describing this MeterGroup."""
series = pd.Series()
all_meters = self.all_meters()
series['total_n_meters'] = len(all_meters)
site_meters = [m for m in all_meters if m.is_site_meter()]
series['total_n_site_meters'] = len(site_meters)
if compute_expensive_stats:
series['correlation_of_sum_of_submeters_with_mains'] = (
self.correlation_of_sum_of_submeters_with_mains(**kwargs))
series['proportion_of_energy_submetered'] = (
self.proportion_of_energy_submetered(**kwargs))
dropout_rates = self._collect_stats_on_all_meters(
kwargs, 'dropout_rate', False)
dropout_rates = np.array(dropout_rates)
series['dropout_rates_ignoring_gaps'] = (
"min={}, mean={}, max={}".format(
dropout_rates.min(),
dropout_rates.mean(),
dropout_rates.max()))
series['mains_sample_period'] = self.mains().sample_period()
series['submeter_sample_period'] = self.submeters().sample_period()
timeframe = self.get_timeframe()
series['timeframe'] = "start={}, end={}".format(timeframe.start, timeframe.end)
series['total_duration'] = str(timeframe.timedelta)
mains_uptime = self.mains().uptime(**kwargs)
series['mains_uptime'] = str(mains_uptime)
try:
series['proportion_uptime'] = (mains_uptime.total_seconds() /
timeframe.timedelta.total_seconds())
except ZeroDivisionError:
series['proportion_uptime'] = np.NaN
series['average_mains_energy_per_day'] = self.mains().average_energy_per_period()
return series
def replace_dataset(identifier, dataset):
"""
Parameters
----------
identifier : ElecMeterID or MeterGroupID
Returns
-------
ElecMeterID or MeterGroupID with dataset replaced with `dataset`
"""
if isinstance(identifier, MeterGroupID):
new_meter_ids = [replace_dataset(id, dataset) for id in identifier.meters]
new_id = MeterGroupID(meters=tuple(new_meter_ids))
elif isinstance(identifier, ElecMeterID):
new_id = identifier._replace(dataset=dataset)
else:
raise TypeError()
return new_id
def iterate_through_submeters_of_two_metergroups(master, slave):
"""
Parameters
----------
master, slave : MeterGroup
Returns
-------
list of 2-tuples of the form (`master_meter`, `slave_meter`)
"""
zipped = []
for master_meter in master.submeters().meters:
slave_identifier = replace_dataset(master_meter.identifier, slave.dataset())
slave_meter = slave[slave_identifier]
zipped.append((master_meter, slave_meter))
return zipped
def combine_chunks_from_generators(index, columns, meters, kwargs):
"""Combines chunks into a single DataFrame.
Adds or averages columns, depending on whether each column is in
PHYSICAL_QUANTITIES_TO_AVERAGE.
Returns
-------
DataFrame
"""
# Regarding columns (e.g. voltage) that we need to average:
# The approach is that we first add everything together
# in the first for-loop, whilst also keeping a
# `columns_to_average_counter` DataFrame
# which tells us what to divide by in order to compute the
# mean for PHYSICAL_QUANTITIES_TO_AVERAGE.
# Regarding doing an in-place addition:
# We convert out cumulator dataframe to a numpy matrix.
# This allows us to use np.add to do an in-place add.
# If we didn't do this then we'd get horrible memory fragmentation.
# See http://stackoverflow.com/a/27526721/732596
DTYPE = np.float32
cumulator = pd.DataFrame(np.NaN, index=index, columns=columns, dtype=DTYPE)
cumulator_arr = cumulator.as_matrix()
columns_to_average_counter = pd.DataFrame(dtype=np.uint16)
timeframe = None
# Go through each generator to try sum values together
for meter in meters:
print_on_line("\rLoading data for meter", meter.identifier, " ")
kwargs_copy = deepcopy(kwargs)
generator = meter.load(**kwargs_copy)
try:
chunk_from_next_meter = generator.next()
except StopIteration:
continue
del generator
del kwargs_copy
gc.collect()
if chunk_from_next_meter.empty or not chunk_from_next_meter.timeframe:
continue
if timeframe is None:
timeframe = chunk_from_next_meter.timeframe
else:
timeframe = timeframe.union(chunk_from_next_meter.timeframe)
# Add (in-place)
for i, column_name in enumerate(columns):
try:
column = chunk_from_next_meter[column_name]
except KeyError:
continue
aligned = column.reindex(index, copy=False).values
del column
cumulator_col = cumulator_arr[:,i]
where_both_are_nan = np.isnan(cumulator_col) & np.isnan(aligned)
np.nansum([cumulator_col, aligned], axis=0, out=cumulator_col,
dtype=DTYPE)
cumulator_col[where_both_are_nan] = np.NaN
del aligned
del where_both_are_nan
gc.collect()
# Update columns_to_average_counter - this is necessary so we do not
# add up columns like 'voltage' which should be averaged.
physical_quantities = chunk_from_next_meter.columns.get_level_values('physical_quantity')
columns_to_average = (set(PHYSICAL_QUANTITIES_TO_AVERAGE)
.intersection(physical_quantities))
if columns_to_average:
counter_increment = pd.DataFrame(1, columns=columns_to_average,
dtype=np.uint16,
index=chunk_from_next_meter.index)
columns_to_average_counter = columns_to_average_counter.add(
counter_increment, fill_value=0)
del counter_increment
del chunk_from_next_meter
gc.collect()
del cumulator_arr
gc.collect()
# Create mean values by dividing any columns which need dividing
for column in columns_to_average_counter:
cumulator[column] /= columns_to_average_counter[column]
del columns_to_average_counter
gc.collect()
print()
print("Done loading data all meters for this chunk.")
cumulator.timeframe = timeframe
return cumulator
meter_sorting_key = lambda meter: meter.instance()
| apache-2.0 |
jlegendary/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
joernhees/scikit-learn | examples/cluster/plot_cluster_comparison.py | 46 | 6620 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example shows characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. With the exception of the last dataset,
the parameters of each of these dataset-algorithm pairs
has been tuned to produce good clustering results. Some
algorithms are more sensitive to parameter values than
others.
The last dataset is an example of a 'null' situation for
clustering: the data is homogeneous, and there is no good
clustering. For this example, the null dataset uses the
same parameters as the dataset in the row above it, which
represents a mismatch in the parameter values and the
data structure.
While these examples give some intuition about the
algorithms, this intuition might not apply to very high
dimensional data.
"""
print(__doc__)
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2}),
(aniso, {'eps': .15, 'n_neighbors': 2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
pkruskal/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
nickos556/pandas-qt | tests/test_BigIntSpinbox.py | 4 | 3217 | # -*- coding: utf-8 -*-
from pandasqt.compat import Qt, QtCore, QtGui
import pytest
from pandasqt.views.BigIntSpinbox import BigIntSpinbox
class TestClass(object):
@pytest.fixture
def spinbox(self, qtbot):
widget = BigIntSpinbox()
qtbot.addWidget(widget)
return widget
def test_init(self, spinbox):
assert spinbox
def test_value(self, spinbox):
assert spinbox.value() == 0
spinbox._lineEdit.setText('') # runs into exception
assert spinbox.value() == 0
def test_minimumMaximum(self, spinbox):
assert spinbox.minimum() == -18446744073709551616
assert spinbox.maximum() == 18446744073709551615
def test_setMinimumMaximum(self, spinbox):
spinbox.setMinimum(0)
spinbox.setMinimum(long(0))
spinbox.setMinimum(1)
spinbox.setMinimum(long(1))
spinbox.setMinimum(-1)
spinbox.setMinimum(long(-1))
with pytest.raises(TypeError) as excinfo:
spinbox.setMinimum('')
assert "int or long" in str(excinfo.value)
spinbox.setMaximum(0)
spinbox.setMaximum(long(0))
spinbox.setMaximum(1)
spinbox.setMaximum(long(1))
spinbox.setMaximum(-1)
spinbox.setMaximum(long(-1))
with pytest.raises(TypeError) as excinfo:
spinbox.setMaximum('')
assert "int or long" in str(excinfo.value)
def test_setValue(self, spinbox):
assert spinbox.setValue(10)
assert spinbox.value() == 10
assert spinbox.setValue(18446744073709551615 + 1)
assert spinbox.value() == spinbox.maximum()
assert spinbox.setValue(-18446744073709551616 - 1)
assert spinbox.value() == spinbox.minimum()
def test_singleStep(self, spinbox):
assert spinbox.singleStep() == 1
assert spinbox.setSingleStep(10) == 10
assert spinbox.setSingleStep(-10) == 10
with pytest.raises(TypeError) as excinfo:
spinbox.setSingleStep('')
spinbox.setSingleStep(0.1212)
assert "int" in str(excinfo.value)
assert spinbox.setSingleStep(0) == 0
def test_stepEnabled(self, spinbox):
assert spinbox.StepUpEnabled
assert spinbox.StepDownEnabled
assert spinbox.stepEnabled() == spinbox.StepUpEnabled | spinbox.StepDownEnabled
spinbox.setMinimum(0)
spinbox.setMaximum(10)
spinbox._lineEdit.setText(str(-1))
assert spinbox.stepEnabled() == spinbox.StepUpEnabled
spinbox._lineEdit.setText(str(11))
assert spinbox.stepEnabled() == spinbox.StepDownEnabled
def test_stepBy(self, spinbox):
spinbox.setMinimum(0)
spinbox.setMaximum(10)
spinbox.setValue(0)
spinbox.stepBy(1)
assert spinbox.value() == 1
spinbox.stepBy(-1)
assert spinbox.value() == 0
spinbox.setMinimum(0)
spinbox.setMaximum(10)
spinbox.setValue(0)
spinbox.stepBy(-1)
assert spinbox.value() == 0 # should be minimum cause -1 is out of bounds
spinbox.setValue(10)
spinbox.stepBy(1)
assert spinbox.value() == 10 # should be maximum cause 11 is out of bounds | mit |
bayesimpact/bob-emploi | data_analysis/lib/cleaned_data.py | 1 | 30930 | """Module to access to datasets already cleaned up.
You should not use this for notebooks that are trying to understand a dataset,
but on the opposite once you've done this notebook you should add some code
here so that all other code can just access it. This code should be very much
tied to the data below it and shouldn't try to cope with errors in the data if
we have checked there are none (e.g. don't remove duplicates if we've checked
that lines are unique).
The format of all the function in this module should always be the same: a
function representing a table, taking as an optional argument the base data
folder and another optional argument (overriding the first one) the exact file
to import from. Each function should return a pandas DataFrame with cleaned
values and column names. The documentation of the function should currently be
the documentation of the returned DataFrame. See the rome_job_group as a good
example.
"""
import codecs
import collections
from os import path
import re
from typing import Any, Dict, List, Optional, Set, Union
import pandas
from scrapy import selector
_ROME_VERSION = 'v346'
# Denominator to compute Market Score because the number of yearly average
# offers are given for 10 candidates.
_YEARLY_AVG_OFFERS_DENOMINATOR = 10
# TODO: Use this function in city suggest importer to read the stats file.
def french_city_stats(data_folder: str = 'data', filename_city_stats: Optional[str] = None) \
-> pandas.DataFrame:
"""Read the french city stats."""
if not filename_city_stats:
filename_city_stats = path.join(data_folder, 'geo/french_cities.csv')
return pandas.read_csv(
filename_city_stats,
sep=',', header=None, usecols=[1, 8, 10, 14, 19, 20],
names=['departement_id', 'zipCode', 'city_id', 'population', 'longitude', 'latitude'],
dtype={
'departement_id': str,
'zipCode': str,
'city_id': str,
'population': int,
'latitude': float,
'longitude': float,
}).set_index('city_id', drop=False)
def job_offers(
data_folder: str = 'data', filename_offers: Optional[str] = None,
filename_colnames: Optional[str] = None) -> pandas.DataFrame:
"""Read the job offers dataset we got from Pole Emploi.
More info about the structure of this dataset can be found
in the notebook job_offers/pe_historical_offers.ipynb
"""
default_offers_path = 'job_offers/OFFRE_EXTRACT_ENRICHIE_FGU_18JANV2016.csv'
filename_offers = path.join(
data_folder, filename_offers or default_offers_path)
filename_colnames = path.join(
data_folder, filename_colnames or 'job_offers/column_names.txt')
with open(filename_colnames) as lines:
column_names = [line.strip() for line in lines.readlines()]
dtypes = {}
dtypes[column_names.index('city_code')] = str
dtypes[column_names.index('departement_code')] = str
dtypes[column_names.index('region_code')] = str
offers = pandas.read_csv(
filename_offers,
encoding='latin-1',
delimiter='|', # The file is *pipe separated*, not *comma separated*
escapechar='\\', # It also contains escaped *pipe separated* strings.
header=None,
names=column_names,
dtype=dtypes)
# Convert to datetime
offers['creation_date'] =\
pandas.to_datetime(offers['creation_date'])
offers['date_debut_validite_offre'] =\
pandas.to_datetime(offers['date_debut_validite_offre'])
offers['rome_id'] = offers.rome_profession_card_code
# Remove undesirable spaces
offers['rome_name'] = offers['rome_profession_card_name'].str.strip()
offers['annual_maximum_salary'] = pandas.to_numeric(
offers.annual_maximum_salary, errors='coerce')
offers['annual_minimum_salary'] = pandas.to_numeric(
offers.annual_minimum_salary, errors='coerce')
# We use `availibility_date` when available and impute `creation_date`
# when missing.
offers['date_debut_imputed'] = offers['date_debut_validite_offre']
offers.loc[offers.date_debut_imputed.isnull(),
'date_debut_imputed'] = offers['creation_date']
# `experience_min_duration` is sometimes expressed in Months or in Year :
# Let's convert everything into month.
annual_expe_condition = (offers.exp_duration_type_code == 'AN')
offers.loc[annual_expe_condition, 'experience_min_duration'] =\
offers.loc[annual_expe_condition, 'experience_min_duration'] * 12
offers.loc[annual_expe_condition, 'exp_duration_type_code'] = 'MO'
return offers
def rome_to_skills(
data_folder: str = 'data', filename_items: Optional[str] = None,
filename_skills: Optional[str] = None) -> pandas.DataFrame:
"""Load a dictionary that maps rome ID to a list of skill IDs.
The 'coherence' table contains a general mapping from rome ID to items
associated with this rome ID. Joining with the skills table will leave only
the skill related associations.
"""
if not filename_items:
filename_items = path.join(
data_folder, f'rome/csv/unix_coherence_item_{_ROME_VERSION}_utf8.csv')
if not filename_skills:
filename_skills = path.join(
data_folder, f'rome/csv/unix_referentiel_competence_{_ROME_VERSION}_utf8.csv')
rome_to_item = pandas.read_csv(filename_items, dtype=str)
skills = pandas.read_csv(filename_skills, dtype=str)
merged = pandas.merge(rome_to_item, skills, on='code_ogr')
merged['skill_name'] = merged.libelle_competence.str.replace("''", "'", regex=False)\
.apply(maybe_add_accents)
merged['skill_is_practical'] = merged.code_type_competence == '2'
return merged[['code_rome', 'code_ogr', 'skill_name', 'skill_is_practical']]
def rome_job_groups(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""A list of all job groups in ROME with their names.
The only column is "name" and the index is the ROME code. Each row
represents a job group clustering multiple professions.
"""
if not filename:
filename = path.join(
data_folder, f'rome/csv/unix_referentiel_code_rome_{_ROME_VERSION}_utf8.csv')
job_groups = pandas.read_csv(filename)
# Fix names that contain double '.
job_groups['name'] = job_groups['libelle_rome'].str.replace("''", "'", regex=False)\
.apply(maybe_add_accents)
job_groups.set_index('code_rome', inplace=True)
return job_groups[['name']]
def rome_holland_codes(data_folder: str = 'data', filename: Optional[str] = None) \
-> pandas.DataFrame:
"""A list of all job groups in ROME with their Holland Codes.
The only columns are the "major" and the "minor" Holland Code and the index
is the ROME code. Each row represents a job group clustering multiple
professions.
"""
if not filename:
filename = f'rome/csv/unix_referentiel_code_rome_riasec_{_ROME_VERSION}_utf8.csv'
filename = path.join(data_folder, filename)
column_names = ['code_rome', 'major', 'minor']
holland_codes = pandas.read_csv(filename, names=column_names)
holland_codes.major.fillna('', inplace=True)
holland_codes.minor.fillna('', inplace=True)
return holland_codes.set_index('code_rome')
def rome_texts(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""A list of all job groups in ROME with some lengthy text definitions.
The columns are "definition", "requirements" and "working_environment".
Each row represents a job group clustering multiple professions.
"""
if not filename:
filename = path.join(data_folder, f'rome/csv/unix_texte_{_ROME_VERSION}_utf8.csv')
texts = pandas.read_csv(filename).pivot_table(
index='code_rome',
columns='libelle_type_texte',
values='libelle_texte',
aggfunc=lambda x: '\n\n'.join(x).replace("''", "'"))
return texts.rename(columns={
'acces_a_em': 'requirements',
'cond_exercice_activite': 'working_environment',
})
def rome_work_environments(
data_folder: str = 'data', links_filename: Optional[str] = None,
ref_filename: Optional[str] = None) -> pandas.DataFrame:
"""A list of all work environment of job groups in ROME.
The columns are "code_rome", "code_ogr" (a unique ID for a work environment
item), "name", "section" (one of STRUCTURES, CONDITIONS and SECTEURS).
"""
if not links_filename:
links_filename = path.join(
data_folder, f'rome/csv/unix_liens_rome_referentiels_{_ROME_VERSION}_utf8.csv')
if not ref_filename:
ref_filename = path.join(
data_folder, f'rome/csv/unix_referentiel_env_travail_{_ROME_VERSION}_utf8.csv')
links = pandas.read_csv(links_filename)
ref = pandas.read_csv(ref_filename)
environments = pandas.merge(links, ref, on='code_ogr', how='inner')
environments['name'] = environments.libelle_env_travail.str.replace("''", "'", regex=False)\
.apply(maybe_add_accents)
return environments.rename(columns={
'libelle_type_section_env_trav': 'section',
})[['name', 'code_ogr', 'code_rome', 'section']]
def rome_jobs(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""A list of all jobs in ROME with their names and their groups.
The columns are "name" and "code_rome" and the index is the OGR code. Each
row represents a profession.
"""
if not filename:
filename = path.join(
data_folder, f'rome/csv/unix_referentiel_appellation_{_ROME_VERSION}_utf8.csv')
jobs = pandas.read_csv(filename, dtype=str)
# Fix names that contain double '.
jobs['name'] = jobs['libelle_appellation_court'].str.replace("''", "'", regex=False)\
.apply(maybe_add_accents)
jobs.set_index('code_ogr', inplace=True)
return jobs[['name', 'code_rome']]
def rome_job_groups_mobility(
data_folder: str = 'data', filename: Optional[str] = None, expand_jobs: bool = False) \
-> pandas.DataFrame:
"""A list of oriented edges in the ROME mobility graph.
The expand_jobs parameter defines what to do with edges going from or to
jobs directly instead of job groups: if True, the function expands the edge
to concern the whole job groups even if only one job was specified ; if
False, the function ignores such edges.
The columns are "source_rome_id", "source_name", "target_rome_id",
"target_name", "mobility_type".
"""
if not filename:
filename = path.join(
data_folder, f'rome/csv/unix_rubrique_mobilite_{_ROME_VERSION}_utf8.csv')
mobility = pandas.read_csv(filename, dtype=str)
mobility.rename(columns={
'code_rome': 'source_rome_id',
'code_rome_cible': 'target_rome_id',
}, inplace=True)
# Expand or ignore job edges.
if expand_jobs:
mobility = mobility.drop_duplicates(subset=['source_rome_id', 'target_rome_id'])
else:
mobility = mobility[
mobility.code_appellation_source.isnull() & mobility.code_appellation_cible.isnull()]
# Add names.
rome_job_group_names = rome_job_groups(data_folder=data_folder).name
mobility['source_rome_name'] = mobility.source_rome_id.map(rome_job_group_names)
mobility['target_rome_name'] = mobility.target_rome_id.map(rome_job_group_names)
# Translate mobility type.
mobility['mobility_type'] = mobility.libelle_type_mobilite.map({
'Proche': 'CLOSE',
'Evolution': 'EVOLUTION',
})
return mobility[[
'source_rome_id',
'source_rome_name',
'target_rome_id',
'target_rome_name',
'mobility_type',
]]
def rome_fap_mapping(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""Mapping from ROME ID to FAP codes.
The index are the ROME IDs and the only column "fap_codes" is a list of
corresponding FAP codes.
"""
if not filename:
filename = path.join(data_folder, 'crosswalks/passage_fap2009_romev3.txt')
with codecs.open(filename, 'r', 'latin-1') as fap_file:
mapping: Dict[str, Set[str]] = collections.defaultdict(set)
for line in fap_file:
matches = re.search(r'"(.*?)"+\s+=\s+"(.*?)"', line)
if not matches:
continue
qualified_romes = matches.groups()[0]
fap = matches.groups()[1]
for qualified_rome in qualified_romes.replace('"', '').split(','):
rome_id = qualified_rome[:5]
mapping[rome_id].add(fap)
return pandas.Series(mapping, name='fap_codes').to_frame()
def rome_isco08_mapping(
data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""Mapping from ROME ID to ISCO 08 codes.
The index are the ROME IDs and the only column "isco08_code" is the corresponding ISCO code.
"""
if not filename:
filename = path.join(data_folder, 'crosswalks/Correspondance_ROME_ISCO08.xlsx')
rome_to_isco_file = pandas.ExcelFile(filename, engine='openpyxl')
mapping = rome_to_isco_file.parse('ROME to ISCO-08', dtype='str')
mapping.rename(
{'Code ISCO08': 'isco08_code', 'Code ROME': 'rome_id'}, axis='columns', inplace=True)
mapping.dropna(subset=['rome_id'], inplace=True)
return mapping.set_index('rome_id')[['isco08_code']]
def naf_subclasses(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""NAF Sub classes.
The index are the IDs of the sub classes (e.g. "0111Z"), and the only
column is "name".
"""
if not filename:
filename = path.join(data_folder, 'naf-2008.xls')
naf_2008 = pandas.read_excel(filename)
naf_2008 = naf_2008.iloc[2:, :]
naf_2008.columns = ['code', 'name']
naf_2008['code'] = naf_2008.code.str.replace('.', '', regex=False)
return naf_2008.set_index('code')
def french_departements(
data_folder: str = 'data', filename: Optional[str] = None,
oversea_filename: Optional[str] = None,
prefix_filename: Optional[str] = None) -> pandas.DataFrame:
"""French départements.
The index are the IDs of the départements, and the columns are "name",
"region_id" and "prefix".
"""
if not filename:
filename = path.join(data_folder, 'geo/insee_france_departements.tsv')
if not oversea_filename:
oversea_filename = path.join(data_folder, 'geo/insee_france_oversee_collectivities.tsv')
if not prefix_filename:
prefix_filename = path.join(data_folder, 'geo/departement_prefix.tsv')
departements = pandas.concat([
pandas.read_csv(filename, sep='\t', dtype=str),
pandas.read_csv(oversea_filename, sep='\t', dtype=str)])
prefixes = pandas.read_csv(prefix_filename, sep='\t', dtype=str).set_index('DEP')
departements.rename(
columns={
'REGION': 'region_id',
'DEP': 'departement_id',
'NCCENR': 'name',
},
inplace=True)
departements.set_index('departement_id', inplace=True)
departements['prefix'] = prefixes.PREFIX
departements.prefix.fillna('', inplace=True)
return departements[['name', 'region_id', 'prefix']]
def french_regions(
data_folder: str = 'data', filename: Optional[str] = None,
prefix_filename: Optional[str] = None) -> pandas.DataFrame:
"""French régions (on January 1st, 2017).
The index are the IDs of the régions, and the column are "name" and "prefix".
"""
if not filename:
filename = path.join(data_folder, 'geo/insee_france_regions.tsv')
if not prefix_filename:
prefix_filename = path.join(data_folder, 'geo/region_prefix.tsv')
regions = pandas.read_csv(filename, sep='\t', dtype=str)
regions.rename(
columns={'REGION': 'region_id', 'NCCENR': 'name'}, inplace=True)
prefixes = pandas.read_csv(prefix_filename, sep='\t', dtype=str).set_index('REGION')
regions.set_index('region_id', inplace=True)
regions['prefix'] = prefixes.PREFIX
regions.prefix.fillna('', inplace=True)
return regions[['name', 'prefix']]
def french_cities(
data_folder: str = 'data', filename: Optional[str] = None, unique: bool = False) \
-> pandas.DataFrame:
"""French cities (all the ones that have existed until January 1st, 2016).
The index are the IDs (Code Officiel Géographique) of the cities, and the
columns are "name", "departement_id", "region_id", "current" (whether the
city is still a city on 2016-01-01) and "current_city_id" (for cities that
have been merged, the ID of the merged city).
"""
if not filename:
filename = path.join(data_folder, 'geo/insee_france_cities.tsv')
cities = pandas.read_csv(filename, sep='\t', dtype=str)
cities['city_id'] = cities.DEP + cities.COM
if unique:
# Drop duplicate indices: french cities table has sometimes multiple
# rows for the same ID, as a city can change name.
cities.drop_duplicates('city_id', inplace=True)
cities.set_index('city_id', inplace=True)
cities.rename(columns={
'NCCENR': 'name',
'ARTMIN': 'prefix',
'DEP': 'departement_id',
'REG': 'region_id',
'POLE': 'current_city_id',
}, inplace=True)
cities['current'] = cities.ACTUAL == '1'
cities['arrondissement'] = cities.ACTUAL == '5'
cities.prefix.fillna('', inplace=True)
cities.prefix = cities.prefix.str[1:-1]
cities['separator'] = cities.prefix.map(
lambda prefix: '' if not prefix or prefix.endswith("'") else ' ')
cities.name = cities.prefix + cities.separator + cities.name
return cities[[
'name', 'departement_id', 'region_id', 'current', 'current_city_id', 'arrondissement']]
def french_urban_areas(data_folder: str = 'data', filename: Optional[str] = None) \
-> pandas.DataFrame:
"""French urban entities.
The index are the IDs (Code Officiel Geographique) of the cities, and the columns are
- "AU2010": ID of the urban area it's part of, except when it is not part of any.
- "periurban": a mode:
- 1: rural, not being part of any urban nor periurban area.
- 2: periurban, more than 40% of inhabitants work in one or several urban areas.
- 3: urban, part of an urban entities with more than 10k jobs.
"""
if not filename:
filename = path.join(data_folder, 'geo/french_urban_areas.xls')
cities = pandas.read_excel(
filename,
sheet_name='Composition_communale',
skiprows=5,
index_col=0)
cities['periurban'] = cities.CATAEU2010.map({
111: 3,
112: 2,
120: 2,
}).fillna(1).astype(int)
return cities[['AU2010', 'periurban']]
def french_urban_entities(data_folder: str = 'data', filename: Optional[str] = None) \
-> pandas.DataFrame:
"""French urban entities.
The index are the IDs (Code Officiel Geographique) of the cities, and the columns are
- "UU2010": ID of the urban entity it's part of, except for rural where
it's an ID grouping all rural cities in the département.
- "urban": a score for how large the urban entity is
- 0: <2k (rural)
- 1: <5k
- 2: <10k
- 3: <20k
- 4: <50k
- 5: <100k
- 6: <200k
- 7: <2M
- 8: Paris
"""
if not filename:
filename = path.join(data_folder, 'geo/french_urban_entities.xls')
sheets = pandas.read_excel(
filename,
sheet_name=['UU2010', 'Composition_communale'],
skiprows=5,
index_col=0)
entities = pandas.merge(
sheets['Composition_communale'], sheets['UU2010'], how='left',
left_on='UU2010', right_index=True)
entities['urban'] = entities.TUU2015.fillna(0).astype(int)
return entities[['UU2010', 'urban']]
def scraped_imt(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""IMT - Information sur le Marché du Travail.
This is information on the labor market scraped from the Pôle emploi website.
The table is indexed by "departement_id" and "rome_id" and contains the
fields equivalent to the ImtLocalJobStats protobuf in camelCase.
"""
if not filename:
filename = path.join(data_folder, 'scraped_imt_local_job_stats.json')
imt = pandas.read_json(filename, orient='records')
imt['departement_id'] = imt.city.apply(lambda c: c['departementId'])
imt['rome_id'] = imt.job.apply(lambda j: j['jobGroup']['romeId'])
return imt.set_index(['departement_id', 'rome_id'])
def transport_scores(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""Table of public transportation scores by city ID from ville-ideale.fr."""
if not filename:
filename = path.join(data_folder, 'geo/ville-ideale-transports.html')
with open(filename, 'rt') as transport_file:
page_text = transport_file.read()
page_selector = selector.Selector(text=page_text)
# Parse the links containing city name and city ID.
city_ids = (
link.split('_')[-1]
for link in page_selector.xpath('//td[@class="ville"]/a/@href').extract())
# Parse the scores.
scores = (
float(note.replace(',', '.'))
for note in page_selector.xpath('//td[@class="note"]/text()').extract())
return {city_id: score if score >= .1 else .1 for city_id, score in zip(city_ids, scores)}
# Regular expression to match unaccented capital E in French text that should
# be capitalized. It has been computed empirically by testing on the full ROME.
# It matches the E in "Etat", "Ecrivain", "Evolution", but not in "Entreprise",
# "Ethnologue" nor "Euro".
_UNACCENTED_E_MATCH = re.compile(
r'E(?=('
'([bcdfghjklpqrstvz]|[cpt][hlr])[aeiouyéèêë]|'
'n([eouyéèêë]|i[^v]|a[^m])|'
'm([aeiuyéèêë]|o[^j])))')
def maybe_add_accents(title: str) -> str:
"""Add an accent on capitalized letters if needed.
In the ROME, most of the capitalized letters have no accent even if the
French word would require one. This function fixes this by using
heuristics.
"""
return _UNACCENTED_E_MATCH.sub('É', title)
def _merge_hard_skills(
skill_ids: Union[float, List[str]],
activitie_ids: Union[float, List[str]]) -> List[str]:
"""Merging skill and activity ids."""
skill_ids = skill_ids if isinstance(skill_ids, list) else []
activitie_ids = activitie_ids if isinstance(activitie_ids, list) else []
return skill_ids + activitie_ids
def job_offers_skills(
data_folder: str = 'data', job_offers_filename: Optional[str] = None,
skills_filename: Optional[str] = None,
activities_filename: Optional[str] = None) -> pandas.DataFrame:
"""Job offers gathered and provided by Pôle emploi and their unwinded skills.
Each row represents a skill required for a specific job offer. Columns are:
the job offer original index, the job group rome code, the job group name and
the skill ID (ogr code).
"""
if not job_offers_filename:
job_offers_filename = path.join(data_folder, 'job_offers/recent_job_offers.csv')
offers = pandas.read_csv(
job_offers_filename,
dtype={'POSTCODE': str, 'ROME_LIST_SKILL_CODE': str, 'ROME_LIST_ACTIVITY_CODE': str},
parse_dates=['CREATION_DATE', 'MODIFICATION_DATE'],
dayfirst=True, infer_datetime_format=True, low_memory=False)
if not skills_filename:
skills_filename = path.join(
data_folder, f'rome/csv/unix_referentiel_competence_{_ROME_VERSION}_utf8.csv')
skills = pandas.read_csv(skills_filename)
skills.set_index('code_ogr', inplace=True)
if not activities_filename:
activities_filename = path.join(
data_folder, f'rome/csv/unix_referentiel_activite_{_ROME_VERSION}_utf8.csv')
activities = pandas.read_csv(activities_filename)
activities.set_index('code_ogr', inplace=True)
# Cleaning columns.
offers.columns = offers.columns.str.lower()
offers['skill_ids'] = offers.rome_list_skill_code.str.split(';')
offers['activitie_ids'] = offers.rome_list_activity_code.str.split(';')
offers['all_skill_ids'] = offers.apply(
lambda row: _merge_hard_skills(row.skill_ids, row.activitie_ids), axis=1)
# Getting skills per job group.
offers = offers.reset_index().rename(index=str, columns={'index': 'offer_num'})
skills_per_job_offer = []
offers.apply(
lambda row: skills_per_job_offer.extend([
[
row.offer_num, row.rome_profession_card_code,
row.rome_profession_name, int(skill_id)
]
for skill_id in row.all_skill_ids]),
axis=1)
unwind_offers_skills = pandas.DataFrame(
skills_per_job_offer, columns=[
'offer_num', 'rome_profession_card_code', 'rome_profession_card_name', 'code_ogr'])
# Fix skill names that contain double '.
unwind_offers_skills['skill_name'] = unwind_offers_skills.code_ogr\
.map(skills.libelle_competence.str.replace("''", "'", regex=False))
unwind_offers_skills['activity_name'] = unwind_offers_skills.code_ogr\
.map(activities.libelle_activite.str.replace("''", "'", regex=False))
unwind_offers_skills['skill_activity_name'] = unwind_offers_skills.skill_name\
.combine_first(unwind_offers_skills.activity_name)
return unwind_offers_skills
def market_scores(data_folder: str = 'data', filename: Optional[str] = None) -> pandas.DataFrame:
"""Market score at the departement level gathered and provided by Pôle emploi.
Each row represents a market. Columns are:
the departement id, the job group rome code, the market tension and
the yearly average denominator and the area level of the market score (D is for
departement, R for Region, etc...)
"""
if not filename:
filename = path.join(data_folder, 'imt/market_score.csv')
market_stats = pandas.read_csv(filename, dtype={'AREA_CODE': 'str'})
market_stats['departement_id'] = market_stats.AREA_CODE
market_stats['market_score'] = market_stats.TENSION_RATIO.div(_YEARLY_AVG_OFFERS_DENOMINATOR)
market_stats['yearly_avg_offers_per_10_candidates'] = market_stats.TENSION_RATIO
market_stats['rome_id'] = market_stats.ROME_PROFESSION_CARD_CODE
market_stats['yearly_avg_offers_denominator'] = _YEARLY_AVG_OFFERS_DENOMINATOR
market_stats = market_stats.set_index(['rome_id', 'departement_id'])
market_stats.dropna(subset=['market_score'], inplace=True)
return market_stats[[
'market_score', 'yearly_avg_offers_per_10_candidates',
'yearly_avg_offers_denominator', 'AREA_TYPE_CODE'
]]
def imt_salaries(
data_folder: str = 'data', filename: Optional[str] = None,
pcs_crosswalk_filename: Optional[str] = None) -> pandas.DataFrame:
"""Salary information from French IMT.
Each row is for a market (département x job group) and contains four columns: junior_min_salary
up to senior_max_salary. Those salaries are gross salary in € per month.
"""
if not filename:
filename = path.join(data_folder, 'imt/salaries.csv')
if not pcs_crosswalk_filename:
pcs_crosswalk_filename = path.join(data_folder, 'crosswalks/passage_pcs_romev3.csv')
pcs_rome = pandas.read_csv(pcs_crosswalk_filename)
salaries = pandas.read_csv(filename, dtype={'AREA_CODE': 'str'})
salaries_dept = salaries[
(salaries.AREA_TYPE_CODE == 'D') & (salaries.MINIMUM_SALARY > 0)]
salaries_dept = salaries_dept\
.merge(pcs_rome, how='inner', left_on='PCS_PROFESSION_CODE', right_on='PCS')\
.rename({'ROME': 'rome_id', 'AREA_CODE': 'departement_id'}, axis='columns')
def _group_salary_per_seniority(salaries: pandas.DataFrame) -> Any:
junior_salaries = salaries[salaries.AGE_GROUP_CODE == 1]
senior_salaries = salaries[salaries.AGE_GROUP_CODE == 2]
return pandas.Series({
'junior_min_salary': junior_salaries.MINIMUM_SALARY.min(),
'junior_max_salary': junior_salaries.MAXIMUM_SALARY.max(),
'senior_min_salary': senior_salaries.MINIMUM_SALARY.min(),
'senior_max_salary': senior_salaries.MAXIMUM_SALARY.max(),
})
return salaries_dept.groupby(['departement_id', 'rome_id'])\
.apply(_group_salary_per_seniority)
def jobs_without_qualifications(data_folder: str = 'data', filename: Optional[str] = None) \
-> pandas.DataFrame:
"""Job groups that don't require any qualifications to get hired (training nor experience).
The indices are the ROME job group IDs.
Each row is for a ROME job group that does not require any qualifications to get hired and
contains one column "no_requirements" which value is always True.
"""
# The strategy for filtering jobs without qualification is described here:
# https://github.com/bayesimpact/bob-emploi-internal/blob/master/data_analysis/notebooks/research/jobbing/jobs_without_qualifications.ipynb
if not filename:
filename = path.join(
data_folder, f'rome/csv/unix_item_arborescence_{_ROME_VERSION}_utf8.csv')
rome_item_arborescence_data = pandas.read_csv(filename)
unqualification_jobs_index = '017'
first_level = rome_item_arborescence_data[
rome_item_arborescence_data.code_pere == unqualification_jobs_index]
second_level = rome_item_arborescence_data[
rome_item_arborescence_data.code_pere.isin(first_level.code_noeud.str[:3])]
return second_level.set_index('code_noeud')\
.rename_axis('rome_id').code_pere.rename('no_requirements')\
.apply(lambda unused: True).to_frame()
_APPLICATION_MODE_PROTO_FIELDS = {
'R1': 'PLACEMENT_AGENCY',
'R2': 'PERSONAL_OR_PROFESSIONAL_CONTACTS',
'R3': 'SPONTANEOUS_APPLICATION',
'R4': 'OTHER_CHANNELS',
}
def _get_app_modes_perc(fap_modes: pandas.DataFrame) -> Dict[str, Any]:
return {
'modes': [
{'mode': _APPLICATION_MODE_PROTO_FIELDS[row.APPLICATION_TYPE_CODE],
'percentage': row.RECRUT_PERCENT}
for row in fap_modes.itertuples()]}
def fap_application_modes(data_folder: str = 'data', filename: Optional[str] = None) \
-> pandas.DataFrame:
"""Application modes per FAP.
Series indexed by FAP codes (e.g. A0Z00, W0Z91), the content is the Dict version of a
RecruitingModesDistribution proto.
"""
if not filename:
filename = path.join(data_folder, 'imt/application_modes.csv')
modes = pandas.read_csv(filename)
return modes.sort_values('RECRUT_PERCENT', ascending=False).\
groupby('FAP_CODE').apply(_get_app_modes_perc)
| gpl-3.0 |
laiy/Database_Project | third_party/nltk/probability.py | 8 | 83570 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (additions)
# Trevor Cohn <[email protected]> (additions)
# Peter Ljunglöf <[email protected]> (additions)
# Liang Dong <[email protected]> (additions)
# Geoffrey Sampson <[email protected]> (additions)
# Ilia Kurenkov <[email protected]> (additions)
#
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
from __future__ import print_function, unicode_literals
import math
import random
import warnings
import array
from operator import itemgetter
from collections import defaultdict
from functools import reduce
from nltk import compat
from nltk.compat import Counter
from nltk.internals import raise_unorderable_types
_NINF = float('-1e300')
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class FreqDist(Counter):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist[word.lower()] += 1
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
Counter.__init__(self, samples)
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return sum(self.values())
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
return self.r_Nr(bins)[r]
def r_Nr(self, bins=None):
"""
Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
_r_Nr = defaultdict(int)
for count in self.values():
_r_Nr[count] += 1
# Special case for Nr[0]:
_r_Nr[0] = bins - self.B() if bins is not None else 0
return _r_Nr
def _cumulative_frequencies(self, samples):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type samples: any
:rtype: list(float)
"""
cf = 0.0
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self.N() == 0:
return 0
return float(self[sample]) / self.N()
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
return self.most_common(1)[0][0]
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted.
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = [item for item, _ in self.most_common(*args)]
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print("%4s" % samples[i], end=' ')
print()
for i in range(len(samples)):
print("%4d" % freqs[i], end=' ')
print()
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def __le__(self, other):
if not isinstance(other, FreqDist):
raise_unorderable_types("<=", self, other)
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
# @total_ordering doesn't work here, since the class inherits from a builtin class
__ge__ = lambda self, other: not self <= other or self == other
__lt__ = lambda self, other: self <= other and not self == other
__gt__ = lambda self, other: not self <= other
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return self.pformat()
def pprint(self, maxlen=10, stream=None):
"""
Print a string representation of this FreqDist to 'stream'
:param maxlen: The maximum number of items to print
:type maxlen: int
:param stream: The stream to print to. stdout by default
"""
print(self.pformat(maxlen=maxlen), file=stream)
def pformat(self, maxlen=10):
"""
Return a string representation of this FreqDist.
:param maxlen: The maximum number of items to display
:type maxlen: int
:rtype: string
"""
items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)]
if len(self) > maxlen:
items.append('...')
return 'FreqDist({{{0}}})'.format(', '.join(items))
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
return (math.log(p, 2) if p != 0 else _NINF)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
p_init = p
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, p_init-p))
return random.choice(list(self.samples()))
@compat.python_2_unicode_compatible
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
return (self._prob if sample in self._sampleset else 0)
def max(self):
return self._samples[0]
def samples(self):
return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
@compat.python_2_unicode_compatible
class RandomProbDist(ProbDistI):
"""
Generates a random probability distribution whereby each sample
will be between 0 and 1 with equal probability (uniform random distribution.
Also called a continuous uniform distribution).
"""
def __init__(self, samples):
if len(samples) == 0:
raise ValueError('A probability distribution must '+
'have at least one sample.')
self._probs = self.unirand(samples)
self._samples = list(self._probs.keys())
@classmethod
def unirand(cls, samples):
"""
The key function that creates a randomized initial distribution
that still sums to 1. Set as a dictionary of prob values so that
it can still be passed to MutableProbDist and called with identical
syntax to UniformProbDist
"""
randrow = [random.random() for i in range(len(samples))]
total = sum(randrow)
for i, x in enumerate(randrow):
randrow[i] = x/total
total = sum(randrow)
if total != 1:
#this difference, if present, is so small (near NINF) that it
#can be subtracted from any element without risking probs not (0 1)
randrow[-1] -= total - 1
return dict((s, randrow[i]) for i, s in enumerate(samples))
def prob(self, sample):
return self._probs.get(sample, 0)
def samples(self):
return self._samples
def __repr__(self):
return '<RandomUniformProbDist with %d samples>' %len(self._probs)
@compat.python_2_unicode_compatible
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probability to all values.
"""
self._prob_dict = (prob_dict.copy() if prob_dict is not None else {})
self._log = log
# Normalize the distribution, if requested.
if normalize:
if len(prob_dict) == 0:
raise ValueError('A DictionaryProbDist must have at least one sample ' +
'before it can be normalized.')
if log:
value_sum = sum_logs(list(self._prob_dict.values()))
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0)
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
@compat.python_2_unicode_compatible
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is parameterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalent to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to parameterize the
estimate. The Lidstone estimate is equivalent to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.B())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None:
bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalent to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
@compat.python_2_unicode_compatible
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
r_Nr = base_fdist.r_Nr(bins)
Nr = [r_Nr[r] for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
@compat.python_2_unicode_compatible
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([list(fd) for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
@compat.python_2_unicode_compatible
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalizing factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()' % freqdist.B()
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
return (c / float(self._N + self._T) if c != 0 else self._P0)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probability Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# Good-Turing method calculates the probability mass to assign to
# events with zero or low counts based on the number of events with
# higher counts. It does so by using the adjusted count *c\**:
#
# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
# - *things with frequency zero in training* = N(1) for c == 0
#
# where *c* is the original count, *N(i)* is the number of event types
# observed with count *i*. We can think the count of unseen as the count
# of frequency one (see Jurafsky & Martin 2nd Edition, p101).
#
# This method is problematic because the situation ``N(c+1) == 0``
# is quite common in the original Good-Turing estimation; smoothing or
# interpolation of *N(i)* values is essential in practice.
#
# Bill Gale and Geoffrey Sampson present a simple and effective approach,
# Simple Good-Turing. As a smoothing curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greater than the standard deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to frequency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the frequency and
yi denotes the frequency of frequency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
SUM_TO_ONE = False
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'bins parameter must not be less than %d=freqdist.B()+1' % (freqdist.B()+1)
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr_non_zero(self):
r_Nr = self._freqdist.r_Nr()
del r_Nr[0]
return r_Nr
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
nonzero = self._r_Nr_non_zero()
if not nonzero:
return [], []
return zip(*sorted(nonzero.items()))
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
i = (r[j-1] if j > 0 else 0)
k = (2 * r[j] - i if j == len(r) - 1 else r[j+1])
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
self._slope = (xy_cov / x_var if x_var != 0 else 0.0)
if self._slope >= -1:
warnings.warn('SimpleGoodTuring did not find a proper best fit '
'line for smoothing probabilities of occurrences. '
'The probability estimates are likely to be '
'unreliable.')
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of frequency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print("Probability Sum:", prob_sum)
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = array.array(str("d"), [0.0]) * len(samples)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return 0.0
return (2**(self._data[i]) if self._logs else self._data[i])
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is None:
return float('-inf')
return (self._data[i] if self._logs else math.log(self._data[i], 2))
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
self._data[i] = (prob if log else math.log(prob, 2))
else:
self._data[i] = (2**(prob) if log else prob)
##/////////////////////////////////////////////////////
## Kneser-Ney Probability Distribution
##//////////////////////////////////////////////////////
# This method for calculating probabilities was introduced in 1995 by Reinhard
# Kneser and Hermann Ney. It was meant to improve the accuracy of language
# models that use backing-off to deal with sparse data. The authors propose two
# ways of doing so: a marginal distribution constraint on the back-off
# distribution and a leave-one-out distribution. For a start, the first one is
# implemented as a class below.
#
# The idea behind a back-off n-gram model is that we have a series of
# frequency distributions for our n-grams so that in case we have not seen a
# given n-gram during training (and as a result have a 0 probability for it) we
# can 'back off' (hence the name!) and try testing whether we've seen the
# n-1-gram part of the n-gram in training.
#
# The novelty of Kneser and Ney's approach was that they decided to fiddle
# around with the way this latter, backed off probability was being calculated
# whereas their peers seemed to focus on the primary probability.
#
# The implementation below uses one of the techniques described in their paper
# titled "Improved backing-off for n-gram language modeling." In the same paper
# another technique is introduced to attempt to smooth the back-off
# distribution as well as the primary one. There is also a much-cited
# modification of this method proposed by Chen and Goodman.
#
# In order for the implementation of Kneser-Ney to be more efficient, some
# changes have been made to the original algorithm. Namely, the calculation of
# the normalizing function gamma has been significantly simplified and
# combined slightly differently with beta. None of these changes affect the
# nature of the algorithm, but instead aim to cut out unnecessary calculations
# and take advantage of storing and retrieving information in dictionaries
# where possible.
@compat.python_2_unicode_compatible
class KneserNeyProbDist(ProbDistI):
"""
Kneser-Ney estimate of a probability distribution. This is a version of
back-off that counts how likely an n-gram is provided the n-1-gram had
been seen in training. Extends the ProbDistI interface, requires a trigram
FreqDist instance to train on. Optionally, a different from default discount
value can be specified. The default discount is set to 0.75.
"""
def __init__(self, freqdist, bins=None, discount=0.75):
"""
:param freqdist: The trigram frequency distribution upon which to base
the estimation
:type freqdist: FreqDist
:param bins: Included for compatibility with nltk.tag.hmm
:type bins: int or float
:param discount: The discount applied when retrieving counts of
trigrams
:type discount: float (preferred, but can be set to int)
"""
if not bins:
self._bins = freqdist.B()
else:
self._bins = bins
self._D = discount
# cache for probability calculation
self._cache = {}
# internal bigram and trigram frequency distributions
self._bigrams = defaultdict(int)
self._trigrams = freqdist
# helper dictionaries used to calculate probabilities
self._wordtypes_after = defaultdict(float)
self._trigrams_contain = defaultdict(float)
self._wordtypes_before = defaultdict(float)
for w0, w1, w2 in freqdist:
self._bigrams[(w0,w1)] += freqdist[(w0, w1, w2)]
self._wordtypes_after[(w0,w1)] += 1
self._trigrams_contain[w1] += 1
self._wordtypes_before[(w1,w2)] += 1
def prob(self, trigram):
# sample must be a triple
if len(trigram) != 3:
raise ValueError('Expected an iterable with 3 members.')
trigram = tuple(trigram)
w0, w1, w2 = trigram
if trigram in self._cache:
return self._cache[trigram]
else:
# if the sample trigram was seen during training
if trigram in self._trigrams:
prob = (self._trigrams[trigram]
- self.discount())/self._bigrams[(w0, w1)]
# else if the 'rougher' environment was seen during training
elif (w0,w1) in self._bigrams and (w1,w2) in self._wordtypes_before:
aftr = self._wordtypes_after[(w0, w1)]
bfr = self._wordtypes_before[(w1, w2)]
# the probability left over from alphas
leftover_prob = ((aftr * self.discount())
/ self._bigrams[(w0, w1)])
# the beta (including normalization)
beta = bfr /(self._trigrams_contain[w1] - aftr)
prob = leftover_prob * beta
# else the sample was completely unseen during training
else:
prob = 0.0
self._cache[trigram] = prob
return prob
def discount(self):
"""
Return the value by which counts are discounted. By default set to 0.75.
:rtype: float
"""
return self._D
def set_discount(self, discount):
"""
Set the value by which counts are discounted to the value of discount.
:param discount: the new value to discount counts by
:type discount: float (preferred, but int possible)
:rtype: None
"""
self._D = discount
def samples(self):
return self._trigrams.keys()
def max(self):
return self._trigrams.max()
def __repr__(self):
'''
Return a string representation of this ProbDist
:rtype: str
'''
return '<KneserNeyProbDist based on {0} trigrams'.format(self._trigrams.N())
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = (pdist.prob(s) for s in pdist.samples())
return -sum(p * math.log(p,2) for p in probs)
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
@compat.python_2_unicode_compatible
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition][word] += 1
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
FreqDist({'the': 3, 'dog': 2, 'not': 1})
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond][sample] += 1
def __reduce__(self):
kv_pairs = ((cond, self[cond]) for cond in self.conditions())
return (self.__class__, (), None, None, kv_pairs)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return list(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in compat.itervalues(self))
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = "%s" % condition
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions()))
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len("%s" % c) for c in conditions)
print(' ' * condition_size, end=' ')
for s in samples:
print("%4s" % s, end=' ')
print()
for c in conditions:
print("%*s" % (condition_size, c), end=' ')
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print("%4d" % f, end=' ')
print()
# @total_ordering doesn't work here, since the class inherits from a builtin class
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<=", self, other)
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types("<", self, other)
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">=", self, other)
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist):
raise_unorderable_types(">", self, other)
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
@compat.python_2_unicode_compatible
class ConditionalProbDistI(dict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return list(self.keys())
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modeling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.corpus import brown
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000])
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> cpdist['passed'].max()
'VBD'
>>> cpdist['passed'].prob('VBD')
0.423...
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
self._probdist_factory = probdist_factory
self._factory_args = factory_args
self._factory_kw_args = factory_kw_args
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
def __missing__(self, key):
self[key] = self._probdist_factory(FreqDist(),
*self._factory_args,
**self._factory_kw_args)
return self[key]
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
self.update(probdist_dict)
def __missing__(self, key):
self[key] = DictionaryProbDist()
return self[key]
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF)
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
def set_logprob(self, prob):
raise ValueError('%s is immutable' % self.__class__.__name__)
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1 + numsamples) // 2) +
random.randint(0, numsamples // 2))
fdist[y] += 1
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1 + numsamples) // 2 + 1):
for y in range(0, numsamples // 2 + 1):
fdist[x+y] += 1
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes)))
print('='*9*(len(pdists)+2))
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1]))
print('-'*9*(len(pdists)+2))
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print(FORMATSTR % val)
# Print the totals for each column (should all be 1.0)
zvals = list(zip(*vals))
sums = [sum(val) for val in zvals[1:]]
print('-'*9*(len(pdists)+2))
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print(FORMATSTR % tuple(sums))
print('='*9*(len(pdists)+2))
# Display the distributions themselves, if they're short enough.
if len("%s" % fdist1) < 70:
print(' fdist1: %s' % fdist1)
print(' fdist2: %s' % fdist2)
print(' fdist3: %s' % fdist3)
print()
print('Generating:')
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55]))
print()
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
sgt = SimpleGoodTuringProbDist(fd)
print('%18s %8s %14s' \
% ("word", "freqency", "SimpleGoodTuring"))
fd_keys_sorted=(key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True))
for key in fd_keys_sorted:
print('%18s %8d %14e' \
% (key, fd[key], sgt.prob(key)))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'KneserNeyProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| apache-2.0 |
MoamerEncsConcordiaCa/tensorflow | tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py | 136 | 1696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
LinkHS/incubator-mxnet | example/speech_recognition/stt_utils.py | 44 | 5892 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
joernhees/scikit-learn | sklearn/svm/tests/test_svm.py | 33 | 35916 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings, assert_raises
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import six
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_true(hasattr(clf, "coef_") == (k == 'linear'))
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [-1, -1, -1])
assert_equal(pred.dtype, np.dtype('intp'))
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(AttributeError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_unicode_kernel():
# Test that a unicode kernel name does not cause a TypeError on clf.fit
if six.PY2:
# Test unicode (same as str on python3)
clf = svm.SVC(kernel=unicode('linear'))
clf.fit(X, Y)
# Test ascii bytes (str is bytes in python2)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
else:
# Test unicode (str is unicode in python3)
clf = svm.SVC(kernel=str('linear'))
clf.fit(X, Y)
# Test ascii bytes (same as str on python2)
clf = svm.SVC(kernel=bytes('linear', 'ascii'))
clf.fit(X, Y)
# Test default behavior on both versions
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
def test_ovr_decision_function():
# One point from each quadrant represents one class
X_train = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
y_train = [0, 1, 2, 3]
# First point is closer to the decision boundaries than the second point
base_points = np.array([[5, 5], [10, 10]])
# For all the quadrants (classes)
X_test = np.vstack((
base_points * [1, 1], # Q1
base_points * [-1, 1], # Q2
base_points * [-1, -1], # Q3
base_points * [1, -1] # Q4
))
y_test = [0] * 2 + [1] * 2 + [2] * 2 + [3] * 2
clf = svm.SVC(kernel='linear', decision_function_shape='ovr')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Test if the prediction is the same as y
assert_array_equal(y_pred, y_test)
deci_val = clf.decision_function(X_test)
# Assert that the predicted class has the maximum value
assert_array_equal(np.argmax(deci_val, axis=1), y_pred)
# Get decision value at test points for the predicted class
pred_class_deci_val = deci_val[range(8), y_pred].reshape((4, 2))
# Assert pred_class_deci_val > 0 here
assert_greater(np.min(pred_class_deci_val), 0.0)
# Test if the first point has lower decision value on every quadrant
# compared to the second point
assert_true(np.all(pred_class_deci_val[:, 0] < pred_class_deci_val[:, 1]))
| bsd-3-clause |
phanein/deepwalk | example_graphs/scoring.py | 1 | 5148 | #!/usr/bin/env python
"""scoring.py: Script that demonstrates the multi-label classification used."""
__author__ = "Bryan Perozzi"
import numpy
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import defaultdict
from gensim.models import Word2Vec, KeyedVectors
from six import iteritems
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from scipy.io import loadmat
from sklearn.utils import shuffle as skshuffle
from sklearn.preprocessing import MultiLabelBinarizer
class TopKRanker(OneVsRestClassifier):
def predict(self, X, top_k_list):
assert X.shape[0] == len(top_k_list)
probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))
all_labels = []
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-k:]].tolist()
all_labels.append(labels)
return all_labels
def sparse2graph(x):
G = defaultdict(lambda: set())
cx = x.tocoo()
for i,j,v in zip(cx.row, cx.col, cx.data):
G[i].add(j)
return {str(k): [str(x) for x in v] for k,v in iteritems(G)}
def main():
parser = ArgumentParser("scoring",
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument("--emb", required=True, help='Embeddings file')
parser.add_argument("--network", required=True,
help='A .mat file containing the adjacency matrix and node labels of the input network.')
parser.add_argument("--adj-matrix-name", default='network',
help='Variable name of the adjacency matrix inside the .mat file.')
parser.add_argument("--label-matrix-name", default='group',
help='Variable name of the labels matrix inside the .mat file.')
parser.add_argument("--num-shuffles", default=2, type=int, help='Number of shuffles.')
parser.add_argument("--all", default=False, action='store_true',
help='The embeddings are evaluated on all training percents from 10 to 90 when this flag is set to true. '
'By default, only training percents of 10, 50 and 90 are used.')
args = parser.parse_args()
# 0. Files
embeddings_file = args.emb
matfile = args.network
# 1. Load Embeddings
model = KeyedVectors.load_word2vec_format(embeddings_file, binary=False)
# 2. Load labels
mat = loadmat(matfile)
A = mat[args.adj_matrix_name]
graph = sparse2graph(A)
labels_matrix = mat[args.label_matrix_name]
labels_count = labels_matrix.shape[1]
mlb = MultiLabelBinarizer(range(labels_count))
# Map nodes to their features (note: assumes nodes are labeled as integers 1:N)
features_matrix = numpy.asarray([model[str(node)] for node in range(len(graph))])
# 2. Shuffle, to create train/test groups
shuffles = []
for x in range(args.num_shuffles):
shuffles.append(skshuffle(features_matrix, labels_matrix))
# 3. to score each train/test group
all_results = defaultdict(list)
if args.all:
training_percents = numpy.asarray(range(1, 10)) * .1
else:
training_percents = [0.1, 0.5, 0.9]
for train_percent in training_percents:
for shuf in shuffles:
X, y = shuf
training_size = int(train_percent * X.shape[0])
X_train = X[:training_size, :]
y_train_ = y[:training_size]
y_train = [[] for x in range(y_train_.shape[0])]
cy = y_train_.tocoo()
for i, j in zip(cy.row, cy.col):
y_train[i].append(j)
assert sum(len(l) for l in y_train) == y_train_.nnz
X_test = X[training_size:, :]
y_test_ = y[training_size:]
y_test = [[] for _ in range(y_test_.shape[0])]
cy = y_test_.tocoo()
for i, j in zip(cy.row, cy.col):
y_test[i].append(j)
clf = TopKRanker(LogisticRegression())
clf.fit(X_train, y_train_)
# find out how many labels should be predicted
top_k_list = [len(l) for l in y_test]
preds = clf.predict(X_test, top_k_list)
results = {}
averages = ["micro", "macro"]
for average in averages:
results[average] = f1_score(mlb.fit_transform(y_test), mlb.fit_transform(preds), average=average)
all_results[train_percent].append(results)
print ('Results, using embeddings of dimensionality', X.shape[1])
print ('-------------------')
for train_percent in sorted(all_results.keys()):
print ('Train percent:', train_percent)
for index, result in enumerate(all_results[train_percent]):
print ('Shuffle #%d: ' % (index + 1), result)
avg_score = defaultdict(float)
for score_dict in all_results[train_percent]:
for metric, score in iteritems(score_dict):
avg_score[metric] += score
for metric in avg_score:
avg_score[metric] /= len(all_results[train_percent])
print ('Average score:', dict(avg_score))
print ('-------------------')
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
mwrightevent38/MissionPlanner | Lib/site-packages/scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| gpl-3.0 |
shiruilu/CAPE | skin_detection/appendixa_skin_detect.py | 1 | 3360 | """
Created on May 27, 2015
@author: shiruilu
skin detect from Appendix A of CAPE
"""
import os
import sys
source_dirs = ['cape_util']
for d in source_dirs:
sys.path.insert( 0, os.path.join(os.getcwd(), '../'+d) )
import cape_util
import numpy as np
import cv2
import matplotlib.pyplot as plt
from scipy.signal import correlate2d
import ipdb
IMG_DIR = '../resources/images/'
BM_DIR = './benchmarks/'
def ellipse_test(A, B, bound=1.0, prob=1.0, return_prob=False):
'''test a CIELab color falls in certain ellipse'''
elpse = (1.0*(A-143)/6.5)**2 + (1.0*(B-148)/12.0)**2
if not return_prob:
return elpse < (1.0*bound/prob)
else:
return np.minimum(1.0/(elpse+1e-6), 1.0)
def check_neighbor(mask):
neighbor = np.ones([4,4], dtype='float')
return correlate2d(mask.astype('float')/255.0, neighbor
, mode='same', boundary='wrap') >= 1
def HSV_threshold(H, S):
#s:0.25~0.75, h>0.095
return (S>=0.25*255) & (S<=0.75*255) & (H<0.095*180)
def skin_detect(img):
"""img: in BGR mode"""
# initialized all-white mask
skinMask = np.zeros(img.shape[0:2], img.dtype)
img_LAB = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).astype('float')
img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype('float')
# ellipse and HSV_test
skinMask[ ellipse_test(img_LAB[...,1], img_LAB[...,2], bound=1.0, prob=0.6)
& HSV_threshold(img_HSV[...,0], img_HSV[...,1]) ] \
= 255
# relaxed ellipse test, guarenteed by skin neighborhood
skinMask[ (skinMask ==0)
& ellipse_test(img_LAB[...,1], img_LAB[...,2]
, bound=1.25, prob=0.6)
& check_neighbor(skinMask)] = 255
# filling holes:image closing on skinMask
# http://stackoverflow.com/a/10317883/2729100
_h,_w = img.shape[0:2]
_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(_h/16,_w/16))
skinMask_closed = cv2.morphologyEx(skinMask,cv2.MORPH_CLOSE,_kernel)
# skinMask_closed = skinMask
cape_util.display(np.hstack([skinMask, skinMask_closed]), name='skin mask closing before after', mode='gray')
# initialization, can't remove, otherwise mask==0 area will be random
skin = 255*np.ones(img.shape, img.dtype)
skin = cv2.bitwise_and(img, img, mask=skinMask_closed)
return skin, (skinMask_closed/255).astype(bool)
def skin_prob_map(img):
"""
Keyword Arguments:
img -- np.uint8 (m,n,3) BGR
"""
# initialized all-white mask
skin_prob_map = np.zeros(img.shape[0:2], dtype='float')
img_LAB = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).astype('float')
img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype('float')
skin_prob_map = ellipse_test(img_LAB[...,1], img_LAB[...,2], bound=1.0, prob=1.0, return_prob=True)
skin_prob_map[HSV_threshold(img_HSV[...,0], img_HSV[...,1]) ] = 0.0
skin_prob_map[ (skin_prob_map < 1.0)
& ellipse_test(img_LAB[...,1], img_LAB[...,2]
, bound=1.25, prob=0.9)
& check_neighbor(255*(skin_prob_map==1.0).astype('uint8'))] = 1.0
return skin_prob_map
def main():
img = cv2.imread(IMG_DIR+'teaser_face.png')
skin, _ = skin_detect( img )
plt.imshow( cv2.cvtColor(np.hstack([img, skin]), cv2.COLOR_BGR2RGB) )
plt.show()
return 0
if __name__ == '__main__':
main() | mit |
DreamLiMu/ML_Python | les3/treePlotter.py | 1 | 3342 | #-*-coding:utf-8-*-
##################
# 该代码是解决输出是中文的问题
# UnicodeEncodeError: 'ascii' codec can't encode characters in position 32-34: ordinal not in range(128
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
#####################
import matplotlib.pyplot as plt
import numpy as np
##(以下三行)定义文本框和箭头
decisionNode = dict(boxstyle = "sawtooth",fc = "0.8")
leafNode = dict(boxstyle="round4", fc = "0.8")
arrow_args = dict(arrowstyle = "<-")
##(以下两行)绘制带箭头的注解
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt,xycoords="axes fraction",xytext=centerPt,textcoords="axes fraction",va="center",ha="center",bbox=nodeType,arrowprops=arrow_args)
##在父子节点间填充文本信息
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center",ha="center",rotation=30)
def plotTree(myTree, parentPt, nodeTxt):
##计算宽和高
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0]
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
##标记子节点属性值
plotMidText(cntrPt,parentPt,nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
##减小y偏移量
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key],cntrPt,str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt,leafNode)
plotMidText((plotTree.xOff,plotTree.yOff),cntrPt,str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
def createPlot(inTree):
fig = plt.figure(1,facecolor = "white")
fig.clf()
axprops = dict(xticks=[],yticks=[])
createPlot.ax1 = plt.subplot(111,frameon=False, **axprops)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW
plotTree.yOff = 1.0
plotTree(inTree, (0.5,1.0),'')
plt.show()
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
##(以下三行)测试节点的数据类型是否为字典
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
listOfTrees = [{'no surfacing':{0:'no',1:{'flippers':{0:'no',1:'yes'}}}},{'no surfacing':{0:'no',1:{'flippers':{0:{'head':{0:'no',1:'yes'}},1:'no'}}}}]
return listOfTrees[i]
| gpl-2.0 |
enavarro222/PiPa | emoncms_client.py | 1 | 7298 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import sys
import logging
from datetime import datetime, timedelta
import time
import calendar
import requests
import pandas as pd
class EmoncmsClient(object):
""" Get data from emoncms API
"""
def __init__(self, url, apikey=None):
self._logger = logging.getLogger("EmoncmsClient")
self.url = url
self.apikey = apikey
self._tres = {}
# compute timeres
res = self._get_json(self.url + "/feed/list.json")
for feed in res:
fid = int(feed["id"])
self._tres[fid] = self._compute_time_res(fid)
def _get_json(self, url, params=None):
if params is None:
params = self._default_params()
results = requests.get(url, params=params)
# error if not 200 for HTTP status
results.raise_for_status()
if results.text == "false":
raise RuntimeError("Impossible to get the data (%s)" % results.url)
self._logger.debug("query: %s" % results.url)
return results.json()
def _default_params(self):
params = {}
if self.apikey is not None:
params["apikey"] = self.apikey
return params
def feeds(self):
""" Get data about all available feeds
"""
res = self._get_json(self.url + "/feed/list.json")
for feed in res:
feed[u"id"] = int(feed["id"])
feed[u"tres"] = self.time_res(feed["id"])
feed[u"date"] = datetime.fromtimestamp(feed["time"])
return res
def get_value(self, fid):
""" return the last value of a field
"""
all_data = self.feeds()
all_data = {feed["id"]: feed for feed in all_data}
if fid not in all_data:
raise ValueError("Feed not found")
return all_data[fid]
def time_res(self, fid):
return self._tres.get(fid, None)
def _compute_time_res(self, fid):
""" Compute the minimal time resolution (seconds)
"""
last_date = self.get_value(fid)["date"]
nb_data = 100
ok = False
while not ok:
# get data 20 mins ago
start_date = last_date - timedelta(0, 20*60)
data = self.get_data(fid, delta_sec=13, start_date=start_date, nb_data=nb_data)
if len(data) >= 2:
ok = True
tres = (data.index[1]-data.index[0]).total_seconds()
else:
nb_data *= 2
if nb_data > 1000:
raise RuntimeError("Impossible to get the time res of the feed, no data ?")
return tres
def _check_interval(self, delta_sec, start_date=None, end_date=None, nb_data=None):
""" check inputs, set defaults
>>> emon = EmoncmsClient("http://localhost/")
>>> # if not given end_date will be now
>>> delta_sec, start_date, nb_data = emon._check_interval(60)
>>> nb_data # default value
100
>>> (datetime.now() - start_date).total_seconds() - delta_sec*nb_data < 1
True
>>> # else start_time is computed from other params
>>> delta_sec, start_date, nb_data = emon._check_interval(60*60, end_date=datetime(2015, 2, 11), nb_data=48)
>>> start_date.isoformat()
'2015-02-09T00:00:00'
"""
default_nb_data = 100
if start_date is not None and end_date is not None:
if nb_data is not None:
raise ValueError("End and start date are given , you can not set nb_data")
nb_data = (end_date - start_date).total_seconds() / delta_sec
if start_date is None:
if end_date is None:
self._logger.info("Set end_time to now (default)")
end_date = datetime.now()
if nb_data is None:
self._logger.info("Set nb_data to 100 (default)")
nb_data = default_nb_data
start_date = end_date - timedelta(0, nb_data*delta_sec)
return delta_sec, start_date, nb_data
def get_data(self, fid, delta_sec, start_date=None, end_date=None, nb_data=None):
"""
:param fid: feed ID to get
"""
# search for feed
for feed in self.feeds():
if fid == feed['id']:
feed_name = feed['name']
break
else:
raise ValueError("Field %s is unknow" % fid)
delta_sec, start_date, nb_data = self._check_interval(
delta_sec=delta_sec,
start_date=start_date,
end_date=end_date,
nb_data=nb_data
)
## make the requests
t_start = time.mktime( start_date.timetuple() )*1000
#t_start = calendar.timegm(start_date.timetuple())*1000
# get datas
data_brut = []
nb_read = 0
nb_each_request = 800
while nb_read < nb_data:
# choix du pas de temps
nb_to_read = min(nb_each_request, nb_data-nb_read)
t_end = t_start + nb_to_read*delta_sec*1000
#rint int( t_start ), int( t_end )
query = self.url + "/feed/average.json"
params = self._default_params()
params["id"] = fid
params["start"] = "%d" % t_start
params["end"] = "%d" % t_end
params["interval"] = delta_sec
data_brut += self._get_json(query, params)
# if len(data_brut) < nb_to_read:
# has_min_res = ""
# if len(data_brut) >= 2:
# min_res = (data_brut[1][0] - data_brut[0][0])/1000
# has_min_res = " (min res: %ss)" % min_res
# raise ValueError("Get only %d data (/%d) too small temporal resolution%s" % (len(data_brut), nb_to_read, has_min_res))
nb_read += nb_to_read
t_start = data_brut[-1][0]
## convert it to panda
dates, vals = zip(*data_brut)
dates = [datetime.fromtimestamp(date/1000) for date in dates]
ts = pd.Series(vals, index=dates, name=feed_name)
return ts
def main():
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url", action='store', type=str, help="emoncms root url")
parser.add_argument("-k", "--api-key",
action='store', type=str,
help="API key (get public data if not given)", default=None
)
parser.add_argument("-f", "--feed-id", action='store', type=int, help="Feed ID")
args = parser.parse_args()
# Build emoncms data source object
emon_src = EmoncmsSource(args.url, apikey=args.api_key)
## list all feed
from pprint import pprint
print("#"*5 + " ALL FEEDS " + "#"*5)
feeds = emon_src.feeds()
for feed in feeds:
print("* id:{id:<3} name:{name:<16} value:{value:<10} last update:{date}".format(**feed))
## Plot one feed
if args.feed_id:
print("#"*5 + " PLOT " + "#"*5)
start_date = datetime(2014, 9, 10)
delta_sec = 60*5
ts = emon_src.get_data(args.feed_id, start_date, delta_sec, nb_data=10000)
ts.plot()
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
TheAlgorithms/Python | scheduling/shortest_job_first.py | 1 | 4724 | """
Shortest job remaining first
Please note arrival time and burst
Please use spaces to separate times entered.
"""
from typing import List
import pandas as pd
def calculate_waitingtime(
arrival_time: List[int], burst_time: List[int], no_of_processes: int
) -> List[int]:
"""
Calculate the waiting time of each processes
Return: List of waiting times.
>>> calculate_waitingtime([1,2,3,4],[3,3,5,1],4)
[0, 3, 5, 0]
>>> calculate_waitingtime([1,2,3],[2,5,1],3)
[0, 2, 0]
>>> calculate_waitingtime([2,3],[5,1],2)
[1, 0]
"""
remaining_time = [0] * no_of_processes
waiting_time = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(no_of_processes):
remaining_time[i] = burst_time[i]
complete = 0
increment_time = 0
minm = 999999999
short = 0
check = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(no_of_processes):
if arrival_time[j] <= increment_time:
if remaining_time[j] > 0:
if remaining_time[j] < minm:
minm = remaining_time[j]
short = j
check = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
minm = remaining_time[short]
if minm == 0:
minm = 999999999
if remaining_time[short] == 0:
complete += 1
check = False
# Find finish time of current process
finish_time = increment_time + 1
# Calculate waiting time
finar = finish_time - arrival_time[short]
waiting_time[short] = finar - burst_time[short]
if waiting_time[short] < 0:
waiting_time[short] = 0
# Increment time
increment_time += 1
return waiting_time
def calculate_turnaroundtime(
burst_time: List[int], no_of_processes: int, waiting_time: List[int]
) -> List[int]:
"""
Calculate the turn around time of each Processes
Return: list of turn around times.
>>> calculate_turnaroundtime([3,3,5,1], 4, [0,3,5,0])
[3, 6, 10, 1]
>>> calculate_turnaroundtime([3,3], 2, [0,3])
[3, 6]
>>> calculate_turnaroundtime([8,10,1], 3, [1,0,3])
[9, 10, 4]
"""
turn_around_time = [0] * no_of_processes
for i in range(no_of_processes):
turn_around_time[i] = burst_time[i] + waiting_time[i]
return turn_around_time
def calculate_average_times(
waiting_time: List[int], turn_around_time: List[int], no_of_processes: int
) -> None:
"""
This function calculates the average of the waiting & turnaround times
Prints: Average Waiting time & Average Turn Around Time
>>> calculate_average_times([0,3,5,0],[3,6,10,1],4)
Average waiting time = 2.00000
Average turn around time = 5.0
>>> calculate_average_times([2,3],[3,6],2)
Average waiting time = 2.50000
Average turn around time = 4.5
>>> calculate_average_times([10,4,3],[2,7,6],3)
Average waiting time = 5.66667
Average turn around time = 5.0
"""
total_waiting_time = 0
total_turn_around_time = 0
for i in range(no_of_processes):
total_waiting_time = total_waiting_time + waiting_time[i]
total_turn_around_time = total_turn_around_time + turn_around_time[i]
print("Average waiting time = %.5f" % (total_waiting_time / no_of_processes))
print("Average turn around time =", total_turn_around_time / no_of_processes)
if __name__ == "__main__":
print("Enter how many process you want to analyze")
no_of_processes = int(input())
burst_time = [0] * no_of_processes
arrival_time = [0] * no_of_processes
processes = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and brust time for process:--" + str(i + 1))
arrival_time[i], burst_time[i] = map(int, input().split())
waiting_time = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
bt = burst_time
n = no_of_processes
wt = waiting_time
turn_around_time = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
fcfs = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.